diff options
author | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2011-02-21 22:27:26 +0300 |
---|---|---|
committer | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2011-02-21 22:27:26 +0300 |
commit | be663ab67077fac8e23eb8e231a8c1c94cb32e54 (patch) | |
tree | c1d80a72f86be20135d3e57178e99b9d855f622f /drivers/net/wireless/iwlegacy | |
parent | 4bc85c1324aaa4a8bb0171e332ff762b6230bdfe (diff) | |
download | linux-be663ab67077fac8e23eb8e231a8c1c94cb32e54.tar.xz |
iwlwifi: split the drivers for agn and legacy devices 3945/4965
Intel WiFi devices 3945 and 4965 now have their own driver in the folder
drivers/net/wireless/iwlegacy
Add support to build these drivers independently of the driver for
AGN devices. Selecting the 3945 builds iwl3945.ko and iwl_legacy.ko,
and selecting the 4965 builds iwl4965.ko and iwl_legacy.ko. iwl-legacy.ko
contains code shared between both devices.
The 3945 is an ABG/BG device, with no support for 802.11n. The 4965 is a 2x3
ABGN device.
Signed-off-by: Meenakshi Venkataraman <meenakshi.venkataraman@intel.com>
Acked-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlegacy')
56 files changed, 42384 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig new file mode 100644 index 000000000000..2a45dd44cc12 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/Kconfig @@ -0,0 +1,116 @@ +config IWLWIFI_LEGACY + tristate "Intel Wireless Wifi legacy devices" + depends on PCI && MAC80211 + select FW_LOADER + select NEW_LEDS + select LEDS_CLASS + select LEDS_TRIGGERS + select MAC80211_LEDS + +menu "Debugging Options" + depends on IWLWIFI_LEGACY + +config IWLWIFI_LEGACY_DEBUG + bool "Enable full debugging output in 4965 and 3945 drivers" + depends on IWLWIFI_LEGACY + ---help--- + This option will enable debug tracing output for the iwlwifilegacy + drivers. + + This will result in the kernel module being ~100k larger. You can + control which debug output is sent to the kernel log by setting the + value in + + /sys/class/net/wlan0/device/debug_level + + This entry will only exist if this option is enabled. + + To set a value, simply echo an 8-byte hex value to the same file: + + % echo 0x43fff > /sys/class/net/wlan0/device/debug_level + + You can find the list of debug mask values in: + drivers/net/wireless/iwlwifilegacy/iwl-debug.h + + If this is your first time using this driver, you should say Y here + as the debug information can assist others in helping you resolve + any problems you may encounter. + +config IWLWIFI_LEGACY_DEBUGFS + bool "4965 and 3945 debugfs support" + depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS + ---help--- + Enable creation of debugfs files for the iwlwifilegacy drivers. This + is a low-impact option that allows getting insight into the + driver's state at runtime. + +config IWLWIFI_LEGACY_DEVICE_TRACING + bool "iwlwifilegacy legacy device access tracing" + depends on IWLWIFI_LEGACY + depends on EVENT_TRACING + help + Say Y here to trace all commands, including TX frames and IO + accesses, sent to the device. If you say yes, iwlwifilegacy will + register with the ftrace framework for event tracing and dump + all this information to the ringbuffer, you may need to + increase the ringbuffer size. See the ftrace documentation + for more information. + + When tracing is not enabled, this option still has some + (though rather small) overhead. + + If unsure, say Y so we can help you better when problems + occur. +endmenu + +config IWL4965 + tristate "Intel Wireless WiFi 4965AGN (iwl4965)" + depends on IWLWIFI_LEGACY + ---help--- + This option enables support for + + Select to build the driver supporting the: + + Intel Wireless WiFi Link 4965AGN + + This driver uses the kernel's mac80211 subsystem. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + The microcode is typically installed in /lib/firmware. You can + look in the hotplug script /etc/hotplug/firmware.agent to + determine which directory FIRMWARE_DIR is set to when the script + runs. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read <file:Documentation/kbuild/modules.txt>. The + module will be called iwl4965. + +config IWL3945 + tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" + depends on IWLWIFI_LEGACY + ---help--- + Select to build the driver supporting the: + + Intel PRO/Wireless 3945ABG/BG Network Connection + + This driver uses the kernel's mac80211 subsystem. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + The microcode is typically installed in /lib/firmware. You can + look in the hotplug script /etc/hotplug/firmware.agent to + determine which directory FIRMWARE_DIR is set to when the script + runs. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read <file:Documentation/kbuild/modules.txt>. The + module will be called iwl3945. diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile new file mode 100644 index 000000000000..d56aeb38c211 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/Makefile @@ -0,0 +1,25 @@ +obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o +iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o +iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o +iwl-legacy-objs += iwl-scan.o iwl-led.o +iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o +iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o + +iwl-legacy-objs += $(iwl-legacy-m) + +CFLAGS_iwl-devtrace.o := -I$(src) + +# 4965 +obj-$(CONFIG_IWL4965) += iwl4965.o +iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o +iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o +iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o +iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o +iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o + +# 3945 +obj-$(CONFIG_IWL3945) += iwl3945.o +iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o +iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c new file mode 100644 index 000000000000..cfabb38793ab --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c @@ -0,0 +1,523 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-3945-debugfs.h" + + +static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", + le32_to_cpu(priv->_3945.statistics.flag)); + if (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (le32_to_cpu(priv->_3945.statistics.flag) & + UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + return p; +} + +ssize_t iwl3945_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + + sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; + ssize_t ret; + struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, + *max_ofdm; + struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct iwl39_statistics_rx_non_phy *general, *accum_general; + struct iwl39_statistics_rx_non_phy *delta_general, *max_general; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &priv->_3945.statistics.rx.ofdm; + cck = &priv->_3945.statistics.rx.cck; + general = &priv->_3945.statistics.rx.general; + accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm; + accum_cck = &priv->_3945.accum_statistics.rx.cck; + accum_general = &priv->_3945.accum_statistics.rx.general; + delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm; + delta_cck = &priv->_3945.delta_statistics.rx.cck; + delta_general = &priv->_3945.delta_statistics.rx.general; + max_ofdm = &priv->_3945.max_delta.rx.ofdm; + max_cck = &priv->_3945.max_delta.rx.cck; + max_general = &priv->_3945.max_delta.rx.general; + + pos += iwl3945_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "crc32_good:", le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, + delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, + delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, + delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, + delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, + delta_cck->overrun_err, max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, + max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, + delta_cck->sfd_timeout, max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, + delta_cck->fina_timeout, max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, + delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, + delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, + delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, + delta_general->bogus_cts, max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, + delta_general->bogus_ack, max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl3945_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250; + ssize_t ret; + struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + tx = &priv->_3945.statistics.tx; + accum_tx = &priv->_3945.accum_statistics.tx; + delta_tx = &priv->_3945.delta_statistics.tx; + max_tx = &priv->_3945.max_delta.tx; + pos += iwl3945_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300; + ssize_t ret; + struct iwl39_statistics_general *general, *accum_general; + struct iwl39_statistics_general *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * The statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + general = &priv->_3945.statistics.general; + dbg = &priv->_3945.statistics.general.dbg; + div = &priv->_3945.statistics.general.div; + accum_general = &priv->_3945.accum_statistics.general; + delta_general = &priv->_3945.delta_statistics.general; + max_general = &priv->_3945.max_delta.general; + accum_dbg = &priv->_3945.accum_statistics.general.dbg; + delta_dbg = &priv->_3945.delta_statistics.general.dbg; + max_dbg = &priv->_3945.max_delta.general.dbg; + accum_div = &priv->_3945.accum_statistics.general.div; + delta_div = &priv->_3945.delta_statistics.general.div; + max_div = &priv->_3945.max_delta.general.div; + pos += iwl3945_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" + "acumulative delta max\n", + "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + " %-30s %10u %10u %10u %10u\n", + "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h new file mode 100644 index 000000000000..8fef4b32b447 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h @@ -0,0 +1,60 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos); +#else +static ssize_t iwl3945_ucode_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + return 0; +} +static ssize_t iwl3945_ucode_tx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + return 0; +} +static ssize_t iwl3945_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h new file mode 100644 index 000000000000..836c9919f82e --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h @@ -0,0 +1,187 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_3945_fh_h__ +#define __iwl_3945_fh_h__ + +/************************************/ +/* iwl3945 Flow Handler Definitions */ +/************************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH39_MEM_LOWER_BOUND (0x0800) +#define FH39_MEM_UPPER_BOUND (0x1000) + +#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140) +#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180) +#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400) +#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0) +#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500) +#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680) + +/* TFDB (Transmit Frame Buffer Descriptor) */ +#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \ + ((_ch) * 2 + (buf)) * 0x28) +#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch)) + +/* CBCC channel is [0,2] */ +#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8) +#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00) +#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04) + +/* RCSR channel is [0,2] */ +#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40) +#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00) +#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04) +#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20) +#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24) + +#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0)) + +/* RSSR */ +#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000) +#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004) + +/* TCSR */ +#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20) +#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00) +#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04) +#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08) + +/* TSSR */ +#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000) +#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008) +#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010) + + +/* DBM */ + +#define FH39_SRVC_CHNL (6) + +#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) +#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) + +#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) + +#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) + +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) + +#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24) +#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16) + +#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \ + (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \ + FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)) + +#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +struct iwl3945_tfd_tb { + __le32 addr; + __le32 len; +} __packed; + +struct iwl3945_tfd { + __le32 control_flags; + struct iwl3945_tfd_tb tbs[4]; + u8 __pad[28]; +} __packed; + + +#endif /* __iwl_3945_fh_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h new file mode 100644 index 000000000000..779d3cb86e2c --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h @@ -0,0 +1,293 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-3945-hw.h) only for hardware-related definitions. + * Please use iwl-commands.h for uCode API definitions. + * Please use iwl-3945.h for driver implementation definitions. + */ + +#ifndef __iwl_3945_hw__ +#define __iwl_3945_hw__ + +#include "iwl-eeprom.h" + +/* RSSI to dBm */ +#define IWL39_RSSI_OFFSET 95 + +#define IWL_DEFAULT_TX_POWER 0x0F + +/* + * EEPROM related constants, enums, and structures. + */ +#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) + +/* + * Mapping of a Tx power level, at factory calibration temperature, + * to a radio/DSP gain table index. + * One for each of 5 "sample" power levels in each band. + * v_det is measured at the factory, using the 3945's built-in power amplifier + * (PA) output voltage detector. This same detector is used during Tx of + * long packets in normal operation to provide feedback as to proper output + * level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl3945_eeprom_txpower_sample { + u8 gain_index; /* index into power (gain) setup table ... */ + s8 power; /* ... for this pwr level for this chnl group */ + u16 v_det; /* PA output voltage */ +} __packed; + +/* + * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. + * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). + * Tx power setup code interpolates between the 5 "sample" power levels + * to determine the nominal setup for a requested power level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl3945_eeprom_txpower_group { + struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */ + s32 a, b, c, d, e; /* coefficients for voltage->power + * formula (signed) */ + s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on + * frequency (signed) */ + s8 saturation_power; /* highest power possible by h/w in this + * band */ + u8 group_channel; /* "representative" channel # in this band */ + s16 temperature; /* h/w temperature at factory calib this band + * (signed) */ +} __packed; + +/* + * Temperature-based Tx-power compensation data, not band-specific. + * These coefficients are use to modify a/b/c/d/e coeffs based on + * difference between current temperature and factory calib temperature. + * Data copied from EEPROM. + */ +struct iwl3945_eeprom_temperature_corr { + u32 Ta; + u32 Tb; + u32 Tc; + u32 Td; + u32 Te; +} __packed; + +/* + * EEPROM map + */ +struct iwl3945_eeprom { + u8 reserved0[16]; + u16 device_id; /* abs.ofs: 16 */ + u8 reserved1[2]; + u16 pmc; /* abs.ofs: 20 */ + u8 reserved2[20]; + u8 mac_address[6]; /* abs.ofs: 42 */ + u8 reserved3[58]; + u16 board_revision; /* abs.ofs: 106 */ + u8 reserved4[11]; + u8 board_pba_number[9]; /* abs.ofs: 119 */ + u8 reserved5[8]; + u16 version; /* abs.ofs: 136 */ + u8 sku_cap; /* abs.ofs: 138 */ + u8 leds_mode; /* abs.ofs: 139 */ + u16 oem_mode; + u16 wowlan_mode; /* abs.ofs: 142 */ + u16 leds_time_interval; /* abs.ofs: 144 */ + u8 leds_off_time; /* abs.ofs: 146 */ + u8 leds_on_time; /* abs.ofs: 147 */ + u8 almgor_m_version; /* abs.ofs: 148 */ + u8 antenna_switch_type; /* abs.ofs: 149 */ + u8 reserved6[42]; + u8 sku_id[4]; /* abs.ofs: 192 */ + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by 3945 has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ + u16 band_1_count; /* abs.ofs: 196 */ + struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ + u16 band_2_count; /* abs.ofs: 226 */ + struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ + u16 band_3_count; /* abs.ofs: 254 */ + struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ + u16 band_4_count; /* abs.ofs: 280 */ + struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ + u16 band_5_count; /* abs.ofs: 304 */ + struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ + + u8 reserved9[194]; + +/* + * 3945 Txpower calibration data. + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 + struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; +/* abs.ofs: 512 */ + struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ + u8 reserved16[172]; /* fill out to full 1024 byte block */ +} __packed; + +#define IWL3945_EEPROM_IMG_SIZE 1024 + +/* End of EEPROM */ + +#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ +#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ + +/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ +#define IWL39_NUM_QUEUES 5 +#define IWL39_CMD_QUEUE_NUM 4 + +#define IWL_DEFAULT_TX_RETRY 15 + +/*********************************************/ + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 4 + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +#define U32_PAD(n) ((4-(n))&0x3) + +#define TFD_CTL_COUNT_SET(n) (n << 24) +#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7) +#define TFD_CTL_PAD_SET(n) (n << 28) +#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IWL39_RTC_INST_LOWER_BOUND (0x000000) +#define IWL39_RTC_INST_UPPER_BOUND (0x014000) + +#define IWL39_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL39_RTC_DATA_UPPER_BOUND (0x808000) + +#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \ + IWL39_RTC_INST_LOWER_BOUND) +#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \ + IWL39_RTC_DATA_LOWER_BOUND) + +#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE +#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE + +static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL39_RTC_DATA_LOWER_BOUND) && + (addr < IWL39_RTC_DATA_UPPER_BOUND); +} + +/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE + * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ +struct iwl3945_shared { + __le32 tx_base_ptr[8]; +} __packed; + +static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags) & 0xFF; +} + +static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags); +} + +static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le16((u16)rate|flags); +} +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c new file mode 100644 index 000000000000..abd923558d48 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c @@ -0,0 +1,64 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-commands.h" +#include "iwl-3945.h" +#include "iwl-core.h" +#include "iwl-dev.h" +#include "iwl-3945-led.h" + + +/* Send led command */ +static int iwl3945_send_led_cmd(struct iwl_priv *priv, + struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + + return iwl_legacy_send_cmd(priv, &cmd); +} + +const struct iwl_led_ops iwl3945_led_ops = { + .cmd = iwl3945_send_led_cmd, +}; diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h new file mode 100644 index 000000000000..96716276eb0d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h @@ -0,0 +1,32 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_led_h__ +#define __iwl_3945_led_h__ + +extern const struct iwl_led_ops iwl3945_led_ops; + +#endif /* __iwl_3945_led_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c new file mode 100644 index 000000000000..4fabc5439858 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c @@ -0,0 +1,994 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/wireless.h> +#include <net/mac80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include "iwl-commands.h" +#include "iwl-3945.h" +#include "iwl-sta.h" + +#define RS_NAME "iwl-3945-rs" + +static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 +}; + +static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 +}; + +static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = { + 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 +}; + +static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = { + 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +struct iwl3945_tpt_entry { + s8 min_rssi; + u8 index; +}; + +static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-72, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-87, IWL_RATE_9M_INDEX}, + {-89, IWL_RATE_6M_INDEX} +}; + +static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-68, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} +}; + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_FLUSH (3*HZ) +#define IWL_RATE_WIN_FLUSH (HZ/2) +#define IWL39_RATE_HIGH_TH 11520 +#define IWL_SUCCESS_UP_TH 8960 +#define IWL_SUCCESS_DOWN_TH 10880 +#define IWL_RATE_MIN_FAILURE_TH 6 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 +#define IWL_RATE_RETRY_TH 15 + +static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band) +{ + u32 index = 0; + u32 table_size = 0; + struct iwl3945_tpt_entry *tpt_table = NULL; + + if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) + rssi = IWL_MIN_RSSI_VAL; + + switch (band) { + case IEEE80211_BAND_2GHZ: + tpt_table = iwl3945_tpt_table_g; + table_size = ARRAY_SIZE(iwl3945_tpt_table_g); + break; + + case IEEE80211_BAND_5GHZ: + tpt_table = iwl3945_tpt_table_a; + table_size = ARRAY_SIZE(iwl3945_tpt_table_a); + break; + + default: + BUG(); + break; + } + + while ((index < table_size) && (rssi < tpt_table[index].min_rssi)) + index++; + + index = min(index, (table_size - 1)); + + return tpt_table[index].index; +} + +static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = -1; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +/** + * iwl3945_rate_scale_flush_windows - flush out the rate scale windows + * + * Returns the number of windows that have gathered data but were + * not flushed. If there were any that were not flushed, then + * reschedule the rate flushing routine. + */ +static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta) +{ + int unflushed = 0; + int i; + unsigned long flags; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + /* + * For each rate, if we have collected data on that rate + * and it has been more than IWL_RATE_WIN_FLUSH + * since we flushed, clear out the gathered statistics + */ + for (i = 0; i < IWL_RATE_COUNT_3945; i++) { + if (!rs_sta->win[i].counter) + continue; + + spin_lock_irqsave(&rs_sta->lock, flags); + if (time_after(jiffies, rs_sta->win[i].stamp + + IWL_RATE_WIN_FLUSH)) { + IWL_DEBUG_RATE(priv, "flushing %d samples of rate " + "index %d\n", + rs_sta->win[i].counter, i); + iwl3945_clear_window(&rs_sta->win[i]); + } else + unflushed++; + spin_unlock_irqrestore(&rs_sta->lock, flags); + } + + return unflushed; +} + +#define IWL_RATE_FLUSH_MAX 5000 /* msec */ +#define IWL_RATE_FLUSH_MIN 50 /* msec */ +#define IWL_AVERAGE_PACKETS 1500 + +static void iwl3945_bg_rate_scale_flush(unsigned long data) +{ + struct iwl3945_rs_sta *rs_sta = (void *)data; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + int unflushed = 0; + unsigned long flags; + u32 packet_count, duration, pps; + + IWL_DEBUG_RATE(priv, "enter\n"); + + unflushed = iwl3945_rate_scale_flush_windows(rs_sta); + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* Number of packets Rx'd since last time this timer ran */ + packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; + + rs_sta->last_tx_packets = rs_sta->tx_packets + 1; + + if (unflushed) { + duration = + jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); + + IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n", + packet_count, duration); + + /* Determine packets per second */ + if (duration) + pps = (packet_count * 1000) / duration; + else + pps = 0; + + if (pps) { + duration = (IWL_AVERAGE_PACKETS * 1000) / pps; + if (duration < IWL_RATE_FLUSH_MIN) + duration = IWL_RATE_FLUSH_MIN; + else if (duration > IWL_RATE_FLUSH_MAX) + duration = IWL_RATE_FLUSH_MAX; + } else + duration = IWL_RATE_FLUSH_MAX; + + rs_sta->flush_time = msecs_to_jiffies(duration); + + IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n", + duration, packet_count); + + mod_timer(&rs_sta->rate_scale_flush, jiffies + + rs_sta->flush_time); + + rs_sta->last_partial_flush = jiffies; + } else { + rs_sta->flush_time = IWL_RATE_FLUSH; + rs_sta->flush_pending = 0; + } + /* If there weren't any unflushed entries, we don't schedule the timer + * to run again */ + + rs_sta->last_flush = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "leave\n"); +} + +/** + * iwl3945_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 64 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta, + struct iwl3945_rate_scale_data *window, + int success, int retries, int index) +{ + unsigned long flags; + s32 fail_count; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + if (!retries) { + IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n"); + return; + } + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + * */ + while (retries > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) { + window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1)); + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame (throw away oldest history), + * OR in "1", and increment "success" if this + * frame was successful. */ + window->data <<= 1; + if (success > 0) { + window->success_counter++; + window->data |= 0x1; + success--; + } + + retries--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = ((window->success_ratio * + rs_sta->expected_tpt[index] + 64) / 128); + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &priv->hw->conf; + struct iwl3945_sta_priv *psta; + struct iwl3945_rs_sta *rs_sta; + struct ieee80211_supported_band *sband; + int i; + + IWL_DEBUG_INFO(priv, "enter\n"); + if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) + goto out; + + psta = (struct iwl3945_sta_priv *) sta->drv_priv; + rs_sta = &psta->rs_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + rs_sta->priv = priv; + + rs_sta->start_rate = IWL_RATE_INVALID; + + /* default to just 802.11b */ + rs_sta->expected_tpt = iwl3945_expected_tpt_b; + + rs_sta->last_partial_flush = jiffies; + rs_sta->last_flush = jiffies; + rs_sta->flush_time = IWL_RATE_FLUSH; + rs_sta->last_tx_packets = 0; + + rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; + rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; + + for (i = 0; i < IWL_RATE_COUNT_3945; i++) + iwl3945_clear_window(&rs_sta->win[i]); + + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + for (i = sband->n_bitrates - 1; i >= 0; i--) { + if (sta->supp_rates[sband->band] & (1 << i)) { + rs_sta->last_txrate_idx = i; + break; + } + } + + priv->_3945.sta_supp_rates = sta->supp_rates[sband->band]; + /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ + if (sband->band == IEEE80211_BAND_5GHZ) { + rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates << + IWL_FIRST_OFDM_RATE; + } + +out: + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + + IWL_DEBUG_INFO(priv, "leave\n"); +} + +static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} + +/* rate scale requires free function to be implemented */ +static void iwl3945_rs_free(void *priv) +{ + return; +} + +static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) +{ + struct iwl3945_rs_sta *rs_sta; + struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; + struct iwl_priv *priv __maybe_unused = iwl_priv; + + IWL_DEBUG_RATE(priv, "enter\n"); + + rs_sta = &psta->rs_sta; + + spin_lock_init(&rs_sta->lock); + init_timer(&rs_sta->rate_scale_flush); + + IWL_DEBUG_RATE(priv, "leave\n"); + + return rs_sta; +} + +static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl3945_rs_sta *rs_sta = priv_sta; + + /* + * Be careful not to use any members of iwl3945_rs_sta (like trying + * to use iwl_priv to print out debugging) since it may not be fully + * initialized at this point. + */ + del_timer_sync(&rs_sta->rate_scale_flush); +} + + +/** + * iwl3945_rs_tx_status - Update rate control values based on Tx results + * + * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by + * the hardware for each rate. + */ +static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + s8 retries = 0, current_count; + int scale_rate_index, first_index, last_index; + unsigned long flags; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + IWL_DEBUG_RATE(priv, "enter\n"); + + retries = info->status.rates[0].count; + /* Sanity Check for retries */ + if (retries > IWL_RATE_RETRY_TH) + retries = IWL_RATE_RETRY_TH; + + first_index = sband->bitrates[info->status.rates[0].idx].hw_value; + if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) { + IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index); + return; + } + + if (!priv_sta) { + IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n"); + return; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!rs_sta->priv) { + IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n"); + return; + } + + + rs_sta->tx_packets++; + + scale_rate_index = first_index; + last_index = first_index; + + /* + * Update the window for each rate. We determine which rates + * were Tx'd based on the total number of retries vs. the number + * of retries configured for each rate -- currently set to the + * priv value 'retry_rate' vs. rate specific + * + * On exit from this while loop last_index indicates the rate + * at which the frame was finally transmitted (or failed if no + * ACK) + */ + while (retries > 1) { + if ((retries - 1) < priv->retry_rate) { + current_count = (retries - 1); + last_index = scale_rate_index; + } else { + current_count = priv->retry_rate; + last_index = iwl3945_rs_next_rate(priv, + scale_rate_index); + } + + /* Update this rate accounting for as many retries + * as was used for it (per current_count) */ + iwl3945_collect_tx_data(rs_sta, + &rs_sta->win[scale_rate_index], + 0, current_count, scale_rate_index); + IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n", + scale_rate_index, current_count); + + retries -= current_count; + + scale_rate_index = last_index; + } + + + /* Update the last index window with success/failure based on ACK */ + IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n", + last_index, + (info->flags & IEEE80211_TX_STAT_ACK) ? + "success" : "failure"); + iwl3945_collect_tx_data(rs_sta, + &rs_sta->win[last_index], + info->flags & IEEE80211_TX_STAT_ACK, 1, last_index); + + /* We updated the rate scale window -- if its been more than + * flush_time since the last run, schedule the flush + * again */ + spin_lock_irqsave(&rs_sta->lock, flags); + + if (!rs_sta->flush_pending && + time_after(jiffies, rs_sta->last_flush + + rs_sta->flush_time)) { + + rs_sta->last_partial_flush = jiffies; + rs_sta->flush_pending = 1; + mod_timer(&rs_sta->rate_scale_flush, + jiffies + rs_sta->flush_time); + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "leave\n"); +} + +static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, + u8 index, u16 rate_mask, enum ieee80211_band band) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + struct iwl_priv *priv __maybe_unused = rs_sta->priv; + + /* 802.11A walks to the next literal adjacent rate in + * the rate table */ + if (unlikely(band == IEEE80211_BAND_5GHZ)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT_3945; + i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + if (rs_sta->tgg) + low = iwl3945_rates[low].prev_rs_tgg; + else + low = iwl3945_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + if (rs_sta->tgg) + high = iwl3945_rates[high].next_rs_tgg; + else + high = iwl3945_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +/** + * iwl3945_rs_get_rate - find the rate for the requested packet + * + * Returns the ieee80211_rate structure allocated by the driver. + * + * The rate control algorithm has no internal mapping between hw_mode's + * rate ordering and the rate ordering used by the rate control algorithm. + * + * The rate control algorithm uses a single table of rates that goes across + * the entire A/B/G spectrum vs. being limited to just one particular + * hw_mode. + * + * As such, we can't convert the index obtained below into the hw_mode's + * rate table and must reference the driver allocated rate table + * + */ +static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta, struct ieee80211_tx_rate_control *txrc) +{ + struct ieee80211_supported_band *sband = txrc->sband; + struct sk_buff *skb = txrc->skb; + u8 low = IWL_RATE_INVALID; + u8 high = IWL_RATE_INVALID; + u16 high_low; + int index; + struct iwl3945_rs_sta *rs_sta = priv_sta; + struct iwl3945_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + unsigned long flags; + u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; + s8 max_rate_idx = -1; + struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + IWL_DEBUG_RATE(priv, "enter\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (rs_sta && !rs_sta->priv) { + IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n"); + priv_sta = NULL; + } + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_mask = sta->supp_rates[sband->band]; + + /* get user max rate if set */ + max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1)) + max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT)) + max_rate_idx = -1; + + index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1); + + if (sband->band == IEEE80211_BAND_5GHZ) + rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; + + spin_lock_irqsave(&rs_sta->lock, flags); + + /* for recent assoc, choose best rate regarding + * to rssi value + */ + if (rs_sta->start_rate != IWL_RATE_INVALID) { + if (rs_sta->start_rate < index && + (rate_mask & (1 << rs_sta->start_rate))) + index = rs_sta->start_rate; + rs_sta->start_rate = IWL_RATE_INVALID; + } + + /* force user max rate if set by user */ + if ((max_rate_idx != -1) && (max_rate_idx < index)) { + if (rate_mask & (1 << max_rate_idx)) + index = max_rate_idx; + } + + window = &(rs_sta->win[index]); + + fail_count = window->counter - window->success_counter; + + if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { + spin_unlock_irqrestore(&rs_sta->lock, flags); + + IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: " + "counter: %d, success_counter: %d, " + "expected_tpt is %sNULL\n", + index, + window->counter, + window->success_counter, + rs_sta->expected_tpt ? "not " : ""); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + goto out; + + } + + current_tpt = window->average_tpt; + + high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask, + sband->band); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((max_rate_idx != -1) && (max_rate_idx < high)) + high = IWL_RATE_INVALID; + + /* Collect Measured throughputs of adjacent rates */ + if (low != IWL_RATE_INVALID) + low_tpt = rs_sta->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = rs_sta->win[high].average_tpt; + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + scale_action = 0; + + /* Low success ratio , need to drop the rate */ + if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { + IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n"); + scale_action = -1; + /* No throughput measured yet for adjacent rates, + * try increase */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + + /* Both adjacent throughputs are measured, but neither one has + * better throughput; we're using the best rate, don't change + * it! */ + } else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && (high_tpt < current_tpt)) { + + IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < " + "current_tpt [%d]\n", + low_tpt, high_tpt, current_tpt); + scale_action = 0; + + /* At least one of the rates has better throughput */ + } else { + if (high_tpt != IWL_INVALID_VALUE) { + + /* High rate has better throughput, Increase + * rate */ + if (high_tpt > current_tpt && + window->success_ratio >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else { + IWL_DEBUG_RATE(priv, + "decrease rate because of high tpt\n"); + scale_action = 0; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) { + /* Lower rate has better + * throughput,decrease rate */ + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * rs_sta->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + + /* Decrese rate */ + if (low != IWL_RATE_INVALID) + index = low; + break; + + case 1: + /* Increase rate */ + if (high != IWL_RATE_INVALID) + index = high; + + break; + + case 0: + default: + /* No change */ + break; + } + + IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n", + index, scale_action, low, high); + + out: + + rs_sta->last_txrate_idx = index; + if (sband->band == IEEE80211_BAND_5GHZ) + info->control.rates[0].idx = rs_sta->last_txrate_idx - + IWL_FIRST_OFDM_RATE; + else + info->control.rates[0].idx = rs_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "leave: %d\n", index); +} + +#ifdef CONFIG_MAC80211_DEBUGFS +static int iwl3945_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int j; + ssize_t ret; + struct iwl3945_rs_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n" + "rate=0x%X flush time %d\n", + lq_sta->tx_packets, + lq_sta->last_txrate_idx, + lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time)); + for (j = 0; j < IWL_RATE_COUNT_3945; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->win[j].counter, + lq_sta->win[j].success_counter, + lq_sta->win[j].success_ratio); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = iwl3945_sta_dbgfs_stats_table_read, + .open = iwl3945_open_file_generic, + .llseek = default_llseek, +}; + +static void iwl3945_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl3945_rs_sta *lq_sta = priv_sta; + + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", 0600, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + +} + +static void iwl3945_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl3945_rs_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void iwl3945_rs_rate_init_stub(void *priv_r, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ +} + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = iwl3945_rs_tx_status, + .get_rate = iwl3945_rs_get_rate, + .rate_init = iwl3945_rs_rate_init_stub, + .alloc = iwl3945_rs_alloc, + .free = iwl3945_rs_free, + .alloc_sta = iwl3945_rs_alloc_sta, + .free_sta = iwl3945_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = iwl3945_add_debugfs, + .remove_sta_debugfs = iwl3945_remove_debugfs, +#endif + +}; +void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + s32 rssi = 0; + unsigned long flags; + struct iwl3945_rs_sta *rs_sta; + struct ieee80211_sta *sta; + struct iwl3945_sta_priv *psta; + + IWL_DEBUG_RATE(priv, "enter\n"); + + rcu_read_lock(); + + sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif, + priv->stations[sta_id].sta.sta.addr); + if (!sta) { + IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n"); + rcu_read_unlock(); + return; + } + + psta = (void *) sta->drv_priv; + rs_sta = &psta->rs_sta; + + spin_lock_irqsave(&rs_sta->lock, flags); + + rs_sta->tgg = 0; + switch (priv->band) { + case IEEE80211_BAND_2GHZ: + /* TODO: this always does G, not a regression */ + if (priv->contexts[IWL_RXON_CTX_BSS].active.flags & + RXON_FLG_TGG_PROTECT_MSK) { + rs_sta->tgg = 1; + rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; + } else + rs_sta->expected_tpt = iwl3945_expected_tpt_g; + break; + + case IEEE80211_BAND_5GHZ: + rs_sta->expected_tpt = iwl3945_expected_tpt_a; + break; + case IEEE80211_NUM_BANDS: + BUG(); + break; + } + + spin_unlock_irqrestore(&rs_sta->lock, flags); + + rssi = priv->_3945.last_rx_rssi; + if (rssi == 0) + rssi = IWL_MIN_RSSI_VAL; + + IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi); + + rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band); + + IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: " + "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, + iwl3945_rates[rs_sta->start_rate].plcp); + rcu_read_unlock(); +} + +int iwl3945_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rs_ops); +} + +void iwl3945_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_ops); +} diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c new file mode 100644 index 000000000000..8359594839e2 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945.c @@ -0,0 +1,2744 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> +#include <net/mac80211.h> + +#include "iwl-fh.h" +#include "iwl-3945-fh.h" +#include "iwl-commands.h" +#include "iwl-sta.h" +#include "iwl-3945.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-led.h" +#include "iwl-3945-led.h" +#include "iwl-3945-debugfs.h" + +#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX, \ + IWL_RATE_##r##M_INDEX_TABLE, \ + IWL_RATE_##ip##M_INDEX_TABLE } + +/* + * Parameter order: + * rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = { + IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ +}; + +static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index) +{ + u8 rate = iwl3945_rates[rate_index].prev_ieee; + + if (rate == IWL_RATE_INVALID) + rate = rate_index; + return rate; +} + +/* 1 = enable the iwl3945_disable_events() function */ +#define IWL_EVT_DISABLE (0) +#define IWL_EVT_DISABLE_SIZE (1532/32) + +/** + * iwl3945_disable_events - Disable selected events in uCode event log + * + * Disable an event by writing "1"s into "disable" + * bitmap in SRAM. Bit position corresponds to Event # (id/type). + * Default values of 0 enable uCode events to be logged. + * Use for only special debugging. This function is just a placeholder as-is, + * you'll need to provide the special bits! ... + * ... and set IWL_EVT_DISABLE to 1. */ +void iwl3945_disable_events(struct iwl_priv *priv) +{ + int i; + u32 base; /* SRAM address of event log header */ + u32 disable_ptr; /* SRAM address of event-disable bitmap array */ + u32 array_size; /* # of u32 entries in array */ + static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { + 0x00000000, /* 31 - 0 Event id numbers */ + 0x00000000, /* 63 - 32 */ + 0x00000000, /* 95 - 64 */ + 0x00000000, /* 127 - 96 */ + 0x00000000, /* 159 - 128 */ + 0x00000000, /* 191 - 160 */ + 0x00000000, /* 223 - 192 */ + 0x00000000, /* 255 - 224 */ + 0x00000000, /* 287 - 256 */ + 0x00000000, /* 319 - 288 */ + 0x00000000, /* 351 - 320 */ + 0x00000000, /* 383 - 352 */ + 0x00000000, /* 415 - 384 */ + 0x00000000, /* 447 - 416 */ + 0x00000000, /* 479 - 448 */ + 0x00000000, /* 511 - 480 */ + 0x00000000, /* 543 - 512 */ + 0x00000000, /* 575 - 544 */ + 0x00000000, /* 607 - 576 */ + 0x00000000, /* 639 - 608 */ + 0x00000000, /* 671 - 640 */ + 0x00000000, /* 703 - 672 */ + 0x00000000, /* 735 - 704 */ + 0x00000000, /* 767 - 736 */ + 0x00000000, /* 799 - 768 */ + 0x00000000, /* 831 - 800 */ + 0x00000000, /* 863 - 832 */ + 0x00000000, /* 895 - 864 */ + 0x00000000, /* 927 - 896 */ + 0x00000000, /* 959 - 928 */ + 0x00000000, /* 991 - 960 */ + 0x00000000, /* 1023 - 992 */ + 0x00000000, /* 1055 - 1024 */ + 0x00000000, /* 1087 - 1056 */ + 0x00000000, /* 1119 - 1088 */ + 0x00000000, /* 1151 - 1120 */ + 0x00000000, /* 1183 - 1152 */ + 0x00000000, /* 1215 - 1184 */ + 0x00000000, /* 1247 - 1216 */ + 0x00000000, /* 1279 - 1248 */ + 0x00000000, /* 1311 - 1280 */ + 0x00000000, /* 1343 - 1312 */ + 0x00000000, /* 1375 - 1344 */ + 0x00000000, /* 1407 - 1376 */ + 0x00000000, /* 1439 - 1408 */ + 0x00000000, /* 1471 - 1440 */ + 0x00000000, /* 1503 - 1472 */ + }; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); + return; + } + + disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32))); + array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32))); + + if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { + IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", + disable_ptr); + for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) + iwl_legacy_write_targ_mem(priv, + disable_ptr + (i * sizeof(u32)), + evt_disable[i]); + + } else { + IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n"); + IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n"); + IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n", + disable_ptr, array_size); + } + +} + +static int iwl3945_hwrate_to_plcp_idx(u8 plcp) +{ + int idx; + + for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++) + if (iwl3945_rates[idx].plcp == plcp) + return idx; + return -1; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x + +static const char *iwl3945_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_3945_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} +#else +static inline const char *iwl3945_get_tx_fail_reason(u32 status) +{ + return ""; +} +#endif + +/* + * get ieee prev rate from rate scale table. + * for A and B mode we need to overright prev + * value + */ +int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate) +{ + int next_rate = iwl3945_get_prev_ieee_rate(rate); + + switch (priv->band) { + case IEEE80211_BAND_5GHZ: + if (rate == IWL_RATE_12M_INDEX) + next_rate = IWL_RATE_9M_INDEX; + else if (rate == IWL_RATE_6M_INDEX) + next_rate = IWL_RATE_6M_INDEX; + break; + case IEEE80211_BAND_2GHZ: + if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && + iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + if (rate == IWL_RATE_11M_INDEX) + next_rate = IWL_RATE_5M_INDEX; + } + break; + + default: + break; + } + + return next_rate; +} + + +/** + * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, + int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + + BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); + + for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); + tx_info->skb = NULL; + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + + if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL39_CMD_QUEUE_NUM) && + priv->mac80211_registered) + iwl_legacy_wake_queue(priv, txq); +} + +/** + * iwl3945_rx_reply_tx - Handle Tx response + */ +static void iwl3945_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_info *info; + struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); + int rate_idx; + int fail; + + if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + txq->time_stamp = jiffies; + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); + ieee80211_tx_info_clear_status(info); + + /* Fill the MRR chain with some info about on-chip retransmissions */ + rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate); + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + + fail = tx_resp->failure_frame; + + info->status.rates[0].idx = rate_idx; + info->status.rates[0].count = fail + 1; /* add final attempt */ + + /* tx_status->rts_retry_count = tx_resp->failure_rts; */ + info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? + IEEE80211_TX_STAT_ACK : 0; + + IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", + txq_id, iwl3945_get_tx_fail_reason(status), status, + tx_resp->rate, tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index); + iwl3945_tx_queue_reclaim(priv, txq_id, index); + + if (status & TX_ABORT_REQUIRED_MSK) + IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + + +/***************************************************************************** + * + * Intel PRO/Wireless 3945ABG/BG Network Connection + * + * RX handler implementations + * + *****************************************************************************/ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +static void iwl3945_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ + int i; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + + prev_stats = (__le32 *)&priv->_3945.statistics; + accum_stats = (u32 *)&priv->_3945.accum_statistics; + delta = (u32 *)&priv->_3945.delta_statistics; + max_delta = (u32 *)&priv->_3945.max_delta; + + for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics); + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + priv->_3945.accum_statistics.general.temperature = + priv->_3945.statistics.general.temperature; + priv->_3945.accum_statistics.general.ttl_timestamp = + priv->_3945.statistics.general.ttl_timestamp; +} +#endif + +void iwl3945_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl3945_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); +#endif + iwl_legacy_recover_from_statistics(priv, pkt); + + memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); +} + +void iwl3945_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + __le32 *flag = (__le32 *)&pkt->u.raw; + + if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + memset(&priv->_3945.accum_statistics, 0, + sizeof(struct iwl3945_notif_statistics)); + memset(&priv->_3945.delta_statistics, 0, + sizeof(struct iwl3945_notif_statistics)); + memset(&priv->_3945.max_delta, 0, + sizeof(struct iwl3945_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl3945_hw_rx_statistics(priv, rxb); +} + + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ + +/* This is necessary only for a number of statistics, see the caller. */ +static int iwl3945_is_network_packet(struct iwl_priv *priv, + struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr3, priv->bssid); + case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ + /* packets to our IBSS update information */ + return !compare_ether_addr(header->addr2, priv->bssid); + default: + return 1; + } +} + +static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + u16 len = le16_to_cpu(rx_hdr->len); + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We received data from the HW, so stop the watchdog */ + if (unlikely(len + IWL39_RX_FRAME_SIZE > + PAGE_SIZE << priv->hw_params.rx_page_order)) { + IWL_DEBUG_DROP(priv, "Corruption detected!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + skb = dev_alloc_skb(128); + if (!skb) { + IWL_ERR(priv, "dev_alloc_skb failed\n"); + return; + } + + if (!iwl3945_mod_params.sw_crypto) + iwl_legacy_set_decrypted_flag(priv, + (struct ieee80211_hdr *)rxb_addr(rxb), + le32_to_cpu(rx_end->status), stats); + + skb_add_rx_frag(skb, 0, rxb->page, + (void *)rx_hdr->payload - (void *)pkt, len); + + iwl_legacy_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) + +static void iwl3945_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); + u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg); + u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff); + u8 network_packet; + + rx_status.flag = 0; + rx_status.mactime = le64_to_cpu(rx_end->timestamp); + rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), + rx_status.band); + + rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); + if (rx_status.band == IEEE80211_BAND_5GHZ) + rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; + + rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & + RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if appropriate */ + if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + if ((unlikely(rx_stats->phy_count > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + rx_stats->phy_count); + return; + } + + if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) + || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status); + return; + } + + + + /* Convert 3945's rssi indicator to dBm */ + rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; + + IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n", + rx_status.signal, rx_stats_sig_avg, + rx_stats_noise_diff); + + header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + + network_packet = iwl3945_is_network_packet(priv, header); + + IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n", + network_packet ? '*' : ' ', + le16_to_cpu(rx_hdr->channel), + rx_status.signal, rx_status.signal, + rx_status.rate_idx); + + iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), + header); + + if (network_packet) { + priv->_3945.last_beacon_time = + le32_to_cpu(rx_end->beacon_timestamp); + priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp); + priv->_3945.last_rx_rssi = rx_status.signal; + } + + iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); +} + +int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad) +{ + int count; + struct iwl_queue *q; + struct iwl3945_tfd *tfd, *tfd_tmp; + + q = &txq->q; + tfd_tmp = (struct iwl3945_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + + if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + NUM_TFD_CHUNKS); + return -EINVAL; + } + + tfd->tbs[count].addr = cpu_to_le32(addr); + tfd->tbs[count].len = cpu_to_le32(len); + + count++; + + tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | + TFD_CTL_PAD_SET(pad)); + + return 0; +} + +/** + * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr] + * + * Does NOT advance any indexes + */ +void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds; + int index = txq->q.read_ptr; + struct iwl3945_tfd *tfd = &tfd_tmp[index]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter; + + /* sanity check */ + counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + if (counter > NUM_TFD_CHUNKS) { + IWL_ERR(priv, "Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (counter) + pci_unmap_single(dev, + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), + PCI_DMA_TODEVICE); + + /* unmap chunks if any */ + + for (i = 1; i < counter; i++) + pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), + le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +/** + * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: + * +*/ +void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id) +{ + u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value; + u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945); + u16 rate_mask; + int rate; + u8 rts_retry_limit; + u8 data_retry_limit; + __le32 tx_flags; + __le16 fc = hdr->frame_control; + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + + rate = iwl3945_rates[rate_index].plcp; + tx_flags = tx_cmd->tx_flags; + + /* We need to figure out how to get the sta->supp_rates while + * in this running context */ + rate_mask = IWL_RATES_MASK_3945; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + if (tx_id >= IWL39_CMD_QUEUE_NUM) + rts_retry_limit = 3; + else + rts_retry_limit = 7; + + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + tx_cmd->rate = rate; + tx_cmd->tx_flags = tx_flags; + + /* OFDM */ + tx_cmd->supp_rates[0] = + ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; + + /* CCK */ + tx_cmd->supp_rates[1] = (rate_mask & 0xF); + + IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " + "cck/ofdm mask: 0x%x/0x%x\n", sta_id, + tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags), + tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); +} + +static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate) +{ + unsigned long flags_spin; + struct iwl_station_entry *station; + + if (sta_id == IWL_INVALID_STATION) + return IWL_INVALID_STATION; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + station = &priv->stations[sta_id]; + + station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; + station->sta.rate_n_flags = cpu_to_le16(tx_rate); + station->sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", + sta_id, tx_rate); + return sta_id; +} + +static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do + + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_poll_bit(priv, CSR_GPIO_IN, + CSR_GPIO_IN_VAL_VAUX_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); + } + */ + + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ +} + +static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); + iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), + rxq->rb_stts_dma); + iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0); + iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), + FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | + FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | + FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | + (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | + FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | + (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | + FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); + + /* fake read to flush all prev I/O */ + iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL); + + return 0; +} + +static int iwl3945_tx_reset(struct iwl_priv *priv) +{ + + /* bypass mode */ + iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2); + + /* RA 0 is active */ + iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); + + /* all 6 fifo are active */ + iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); + + iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); + iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); + iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); + iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); + + iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE, + priv->_3945.shared_phys); + + iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG, + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | + FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); + + + return 0; +} + +/** + * iwl3945_txq_ctx_reset - Reset TX queue context + * + * Destroys all DMA structures and initialize them again + */ +static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc; + int txq_id, slots_num; + + iwl3945_hw_txq_ctx_free(priv); + + /* allocate tx queue structure */ + rc = iwl_legacy_alloc_txq_mem(priv); + if (rc) + return rc; + + /* Tx CMD queue */ + rc = iwl3945_tx_reset(priv); + if (rc) + goto error; + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id], + slots_num, txq_id); + if (rc) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl3945_hw_txq_ctx_free(priv); + return rc; +} + + +/* + * Start up 3945's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +static int iwl3945_apm_init(struct iwl_priv *priv) +{ + int ret = iwl_legacy_apm_init(priv); + + /* Clear APMG (NIC's internal power management) interrupts */ + iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); + iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); + + /* Reset radio chip */ + iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + return ret; +} + +static void iwl3945_nic_config(struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + unsigned long flags; + u8 rev_id = 0; + + spin_lock_irqsave(&priv->lock, flags); + + /* Determine HW type */ + pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); + + IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); + + if (rev_id & PCI_CFG_REV_ID_BIT_RTP) + IWL_DEBUG_INFO(priv, "RTP type\n"); + else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { + IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); + } else { + IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); + } + + if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { + IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); + } else + IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); + + if ((eeprom->board_revision & 0xF0) == 0xD0) { + IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", + eeprom->board_revision); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } else { + IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", + eeprom->board_revision); + iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } + + if (eeprom->almgor_m_version <= 1) { + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); + IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", + eeprom->almgor_m_version); + } else { + IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", + eeprom->almgor_m_version); + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n"); + + if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n"); +} + +int iwl3945_hw_nic_init(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl3945_set_pwr_vmain(priv); + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_legacy_rx_queue_alloc(priv); + if (rc) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl3945_rx_queue_reset(priv, rxq); + + iwl3945_rx_replenish(priv); + + iwl3945_rx_init(priv, rxq); + + + /* Look at using this instead: + rxq->need_update = 1; + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + */ + + iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); + + rc = iwl3945_txq_ctx_reset(priv); + if (rc) + return rc; + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl3945_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; + txq_id++) + if (txq_id == IWL39_CMD_QUEUE_NUM) + iwl_legacy_cmd_queue_free(priv); + else + iwl_legacy_tx_queue_free(priv, txq_id); + + /* free tx queue structure */ + iwl_legacy_txq_mem(priv); +} + +void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + int txq_id; + + /* stop SCD */ + iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0); + iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0); + + /* reset TFD queues */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); + iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, + FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), + 1000); + } + + iwl3945_hw_txq_ctx_free(priv); +} + +/** + * iwl3945_hw_reg_adjust_power_by_temp + * return index delta into power gain settings table +*/ +static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) +{ + return (new_reading - old_reading) * (-11) / 100; +} + +/** + * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range + */ +static inline int iwl3945_hw_reg_temp_out_of_range(int temperature) +{ + return ((temperature < -260) || (temperature > 25)) ? 1 : 0; +} + +int iwl3945_hw_get_temperature(struct iwl_priv *priv) +{ + return iwl_read32(priv, CSR_UCODE_DRV_GP2); +} + +/** + * iwl3945_hw_reg_txpower_get_temperature + * get the current temperature by reading from NIC +*/ +static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int temperature; + + temperature = iwl3945_hw_get_temperature(priv); + + /* driver's okay range is -260 to +25. + * human readable okay range is 0 to +285 */ + IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT); + + /* handle insane temp reading */ + if (iwl3945_hw_reg_temp_out_of_range(temperature)) { + IWL_ERR(priv, "Error bad temperature value %d\n", temperature); + + /* if really really hot(?), + * substitute the 3rd band/group's temp measured at factory */ + if (priv->last_temperature > 100) + temperature = eeprom->groups[2].temperature; + else /* else use most recent "sane" value from driver */ + temperature = priv->last_temperature; + } + + return temperature; /* raw, not "human readable" */ +} + +/* Adjust Txpower only if temperature variance is greater than threshold. + * + * Both are lower than older versions' 9 degrees */ +#define IWL_TEMPERATURE_LIMIT_TIMER 6 + +/** + * iwl3945_is_temp_calib_needed - determines if new calibration is needed + * + * records new temperature in tx_mgr->temperature. + * replaces tx_mgr->last_temperature *only* if calib needed + * (assumes caller will actually do the calibration!). */ +static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv); + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER(priv, "Same temp,\n"); + else + IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff); + + /* if we don't need calibration, *don't* update last_temperature */ + if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { + IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n"); + + /* assume that caller will actually do calib ... + * update the "last temperature" value */ + priv->last_temperature = priv->temperature; + return 1; +} + +#define IWL_MAX_GAIN_ENTRIES 78 +#define IWL_CCK_FROM_OFDM_POWER_DIFF -5 +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) + +/* radio and DSP power table, each step is 1/2 dB. + * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ +static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { + { + {251, 127}, /* 2.4 GHz, highest power */ + {251, 127}, + {251, 127}, + {251, 127}, + {251, 125}, + {251, 110}, + {251, 105}, + {251, 98}, + {187, 125}, + {187, 115}, + {187, 108}, + {187, 99}, + {243, 119}, + {243, 111}, + {243, 105}, + {243, 97}, + {243, 92}, + {211, 106}, + {211, 100}, + {179, 120}, + {179, 113}, + {179, 107}, + {147, 125}, + {147, 119}, + {147, 112}, + {147, 106}, + {147, 101}, + {147, 97}, + {147, 91}, + {115, 107}, + {235, 121}, + {235, 115}, + {235, 109}, + {203, 127}, + {203, 121}, + {203, 115}, + {203, 108}, + {203, 102}, + {203, 96}, + {203, 92}, + {171, 110}, + {171, 104}, + {171, 98}, + {139, 116}, + {227, 125}, + {227, 119}, + {227, 113}, + {227, 107}, + {227, 101}, + {227, 96}, + {195, 113}, + {195, 106}, + {195, 102}, + {195, 95}, + {163, 113}, + {163, 106}, + {163, 102}, + {163, 95}, + {131, 113}, + {131, 106}, + {131, 102}, + {131, 95}, + {99, 113}, + {99, 106}, + {99, 102}, + {99, 95}, + {67, 113}, + {67, 106}, + {67, 102}, + {67, 95}, + {35, 113}, + {35, 106}, + {35, 102}, + {35, 95}, + {3, 113}, + {3, 106}, + {3, 102}, + {3, 95} }, /* 2.4 GHz, lowest power */ + { + {251, 127}, /* 5.x GHz, highest power */ + {251, 120}, + {251, 114}, + {219, 119}, + {219, 101}, + {187, 113}, + {187, 102}, + {155, 114}, + {155, 103}, + {123, 117}, + {123, 107}, + {123, 99}, + {123, 92}, + {91, 108}, + {59, 125}, + {59, 118}, + {59, 109}, + {59, 102}, + {59, 96}, + {59, 90}, + {27, 104}, + {27, 98}, + {27, 92}, + {115, 118}, + {115, 111}, + {115, 104}, + {83, 126}, + {83, 121}, + {83, 113}, + {83, 105}, + {83, 99}, + {51, 118}, + {51, 111}, + {51, 104}, + {51, 98}, + {19, 116}, + {19, 109}, + {19, 102}, + {19, 98}, + {19, 93}, + {171, 113}, + {171, 107}, + {171, 99}, + {139, 120}, + {139, 113}, + {139, 107}, + {139, 99}, + {107, 120}, + {107, 113}, + {107, 107}, + {107, 99}, + {75, 120}, + {75, 113}, + {75, 107}, + {75, 99}, + {43, 120}, + {43, 113}, + {43, 107}, + {43, 99}, + {11, 120}, + {11, 113}, + {11, 107}, + {11, 99}, + {131, 107}, + {131, 99}, + {99, 120}, + {99, 113}, + {99, 107}, + {99, 99}, + {67, 120}, + {67, 113}, + {67, 107}, + {67, 99}, + {35, 120}, + {35, 113}, + {35, 107}, + {35, 99}, + {3, 120} } /* 5.x GHz, lowest power */ +}; + +static inline u8 iwl3945_hw_reg_fix_power_index(int index) +{ + if (index < 0) + return 0; + if (index >= IWL_MAX_GAIN_ENTRIES) + return IWL_MAX_GAIN_ENTRIES - 1; + return (u8) index; +} + +/* Kick off thermal recalibration check every 60 seconds */ +#define REG_RECALIB_PERIOD (60) + +/** + * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests + * + * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) + * or 6 Mbit (OFDM) rates. + */ +static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, + s32 rate_index, const s8 *clip_pwrs, + struct iwl_channel_info *ch_info, + int band_index) +{ + struct iwl3945_scan_power_info *scan_power_info; + s8 power; + u8 power_index; + + scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; + + /* use this channel group's 6Mbit clipping/saturation pwr, + * but cap at regulatory scan power restriction (set during init + * based on eeprom channel data) for this channel. */ + power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); + + power = min(power, priv->tx_power_user_lmt); + scan_power_info->requested_power = power; + + /* find difference between new scan *power* and current "normal" + * Tx *power* for 6Mb. Use this difference (x2) to adjust the + * current "normal" temperature-compensated Tx power *index* for + * this rate (1Mb or 6Mb) to yield new temp-compensated scan power + * *index*. */ + power_index = ch_info->power_info[rate_index].power_table_index + - (power - ch_info->power_info + [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2; + + /* store reference index that we use when adjusting *all* scan + * powers. So we can accommodate user (all channel) or spectrum + * management (single channel) power changes "between" temperature + * feedback compensation procedures. + * don't force fit this reference index into gain table; it may be a + * negative number. This will help avoid errors when we're at + * the lower bounds (highest gains, for warmest temperatures) + * of the table. */ + + /* don't exceed table bounds for "real" setting */ + power_index = iwl3945_hw_reg_fix_power_index(power_index); + + scan_power_info->power_table_index = power_index; + scan_power_info->tpc.tx_gain = + power_gain_table[band_index][power_index].tx_gain; + scan_power_info->tpc.dsp_atten = + power_gain_table[band_index][power_index].dsp_atten; +} + +/** + * iwl3945_send_tx_power - fill in Tx Power command with gain settings + * + * Configures power settings for all rates for the current channel, + * using values from channel info struct, and send to NIC + */ +static int iwl3945_send_tx_power(struct iwl_priv *priv) +{ + int rate_idx, i; + const struct iwl_channel_info *ch_info = NULL; + struct iwl3945_txpowertable_cmd txpower = { + .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel, + }; + u16 chan; + + if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); + + txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; + ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan); + if (!ch_info) { + IWL_ERR(priv, + "Failed to get channel info for channel %d [%d]\n", + chan, priv->band); + return -EINVAL; + } + + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " + "non-Tx channel.\n"); + return 0; + } + + /* fill cmd with power settings for all rates for current channel */ + /* Fill OFDM rate */ + for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0; + rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) { + + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; + + IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, + txpower.power[i].rate); + } + /* Fill CCK rates */ + for (rate_idx = IWL_FIRST_CCK_RATE; + rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) { + txpower.power[i].tpc = ch_info->power_info[i].tpc; + txpower.power[i].rate = iwl3945_rates[rate_idx].plcp; + + IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[i].tpc.tx_gain, + txpower.power[i].tpc.dsp_atten, + txpower.power[i].rate); + } + + return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, + sizeof(struct iwl3945_txpowertable_cmd), + &txpower); + +} + +/** + * iwl3945_hw_reg_set_new_power - Configures power tables at new levels + * @ch_info: Channel to update. Uses power_info.requested_power. + * + * Replace requested_power and base_power_index ch_info fields for + * one channel. + * + * Called if user or spectrum management changes power preferences. + * Takes into account h/w and modulation limitations (clip power). + * + * This does *not* send anything to NIC, just sets up ch_info for one channel. + * + * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to + * properly fill out the scan powers, and actual h/w gain settings, + * and send changes to NIC + */ +static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv, + struct iwl_channel_info *ch_info) +{ + struct iwl3945_channel_power_info *power_info; + int power_changed = 0; + int i; + const s8 *clip_pwrs; + int power; + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; + + /* Get this channel's rate-to-current-power settings table */ + power_info = ch_info->power_info; + + /* update OFDM Txpower settings */ + for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; + i++, ++power_info) { + int delta_idx; + + /* limit new power to be no more than h/w capability */ + power = min(ch_info->curr_txpow, clip_pwrs[i]); + if (power == power_info->requested_power) + continue; + + /* find difference between old and new requested powers, + * update base (non-temp-compensated) power index */ + delta_idx = (power - power_info->requested_power) * 2; + power_info->base_power_index -= delta_idx; + + /* save new requested power value */ + power_info->requested_power = power; + + power_changed = 1; + } + + /* update CCK Txpower settings, based on OFDM 12M setting ... + * ... all CCK power settings for a given channel are the *same*. */ + if (power_changed) { + power = + ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. + requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; + + /* do all CCK rates' iwl3945_channel_power_info structures */ + for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) { + power_info->requested_power = power; + power_info->base_power_index = + ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]. + base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; + ++power_info; + } + } + + return 0; +} + +/** + * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel + * + * NOTE: Returned power limit may be less (but not more) than requested, + * based strictly on regulatory (eeprom and spectrum mgt) limitations + * (no consideration for h/w clipping limitations). + */ +static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) +{ + s8 max_power; + +#if 0 + /* if we're using TGd limits, use lower of TGd or EEPROM */ + if (ch_info->tgd_data.max_power != 0) + max_power = min(ch_info->tgd_data.max_power, + ch_info->eeprom.max_power_avg); + + /* else just use EEPROM limits */ + else +#endif + max_power = ch_info->eeprom.max_power_avg; + + return min(max_power, ch_info->max_power_avg); +} + +/** + * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature + * + * Compensate txpower settings of *all* channels for temperature. + * This only accounts for the difference between current temperature + * and the factory calibration temperatures, and bases the new settings + * on the channel's base_power_index. + * + * If RxOn is "associated", this sends the new Txpower to NIC! + */ +static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int delta_index; + const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ + u8 a_band; + u8 rate_index; + u8 scan_tbl_index; + u8 i; + int ref_temp; + int temperature = priv->temperature; + + if (priv->disable_tx_power_cal || + test_bit(STATUS_SCANNING, &priv->status)) { + /* do not perform tx power calibration */ + return 0; + } + /* set up new Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = iwl_legacy_is_channel_a_band(ch_info); + + /* Get this chnlgrp's factory calibration temperature */ + ref_temp = (s16)eeprom->groups[ch_info->group_index]. + temperature; + + /* get power index adjustment based on current and factory + * temps */ + delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, + ref_temp); + + /* set tx power value for all rates, OFDM and CCK */ + for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945; + rate_index++) { + int power_idx = + ch_info->power_info[rate_index].base_power_index; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within table range */ + power_idx = iwl3945_hw_reg_fix_power_index(power_idx); + ch_info->power_info[rate_index]. + power_table_index = (u8) power_idx; + ch_info->power_info[rate_index].tpc = + power_gain_table[a_band][power_idx]; + } + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; + iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, + ch_info, a_band); + } + } + + /* send Txpower command for current channel to ucode */ + return priv->cfg->ops->lib->send_tx_power(priv); +} + +int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + struct iwl_channel_info *ch_info; + s8 max_power; + u8 a_band; + u8 i; + + if (priv->tx_power_user_lmt == power) { + IWL_DEBUG_POWER(priv, "Requested Tx power same as current " + "limit: %ddBm.\n", power); + return 0; + } + + IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power); + priv->tx_power_user_lmt = power; + + /* set up new Tx powers for each and every channel, 2.4 and 5.x */ + + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = iwl_legacy_is_channel_a_band(ch_info); + + /* find minimum power of all user and regulatory constraints + * (does not consider h/w clipping limitations) */ + max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info); + max_power = min(power, max_power); + if (max_power != ch_info->curr_txpow) { + ch_info->curr_txpow = max_power; + + /* this considers the h/w clipping limitations */ + iwl3945_hw_reg_set_new_power(priv, ch_info); + } + } + + /* update txpower settings for all channels, + * send to NIC if associated. */ + iwl3945_is_temp_calib_needed(priv); + iwl3945_hw_reg_comp_txpower_temp(priv); + + return 0; +} + +static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int rc = 0; + struct iwl_rx_packet *pkt; + struct iwl3945_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved = 0; + + rc = iwl_legacy_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + + return rc; +} + +/** + * iwl3945_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is committed to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active; + struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging; + int rc = 0; + bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + + if (!iwl_legacy_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK; + + /* select antenna */ + staging_rxon->flags &= + ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); + staging_rxon->flags |= iwl3945_get_antenna_flags(priv); + + rc = iwl_legacy_check_rxon_cmd(priv, ctx); + if (rc) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl3945_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_legacy_full_rxon_required(priv, + &priv->contexts[IWL_RXON_CTX_BSS])) { + rc = iwl_legacy_send_rxon_assoc(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + if (rc) { + IWL_ERR(priv, "Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + active_rxon->reserved4 = 0; + active_rxon->reserved5 = 0; + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl3945_rxon_cmd), + &priv->contexts[IWL_RXON_CTX_BSS].active); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + iwl_legacy_clear_ucode_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl_legacy_restore_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(staging_rxon->channel), + staging_rxon->bssid_addr); + + /* + * reserved4 and 5 could have been filled by the iwlcore code. + * Let's clear them before pushing to the 3945. + */ + staging_rxon->reserved4 = 0; + staging_rxon->reserved5 = 0; + + iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); + + /* Apply the new configuration */ + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl3945_rxon_cmd), + staging_rxon); + if (rc) { + IWL_ERR(priv, "Error setting new configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); + + if (!new_assoc) { + iwl_legacy_clear_ucode_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl_legacy_restore_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + } + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); + if (rc) { + IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Init the hardware's rate fallback order based on the band */ + rc = iwl3945_init_hw_rate_table(priv); + if (rc) { + IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc); + return -EIO; + } + + return 0; +} + +/** + * iwl3945_reg_txpower_periodic - called when time to check our temperature. + * + * -- reset periodic timer + * -- see if temp has changed enough to warrant re-calibration ... if so: + * -- correct coeffs for temp (can reset temp timer) + * -- save this temp as "last", + * -- send new set of gain settings to NIC + * NOTE: This should continue working, even when we're not associated, + * so we can keep our internal table of scan powers current. */ +void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) +{ + /* This will kick in the "brute force" + * iwl3945_hw_reg_comp_txpower_temp() below */ + if (!iwl3945_is_temp_calib_needed(priv)) + goto reschedule; + + /* Set up a new set of temp-adjusted TxPowers, send to NIC. + * This is based *only* on current temperature, + * ignoring any previous power measurements */ + iwl3945_hw_reg_comp_txpower_temp(priv); + + reschedule: + queue_delayed_work(priv->workqueue, + &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ); +} + +static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + _3945.thermal_periodic.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_reg_txpower_periodic(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4) + * for the channel. + * + * This function is used when initializing channel-info structs. + * + * NOTE: These channel groups do *NOT* match the bands above! + * These channel groups are based on factory-tested channels; + * on A-band, EEPROM's "group frequency" entries represent the top + * channel in each group 1-4. Group 5 All B/G channels are in group 0. + */ +static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv, + const struct iwl_channel_info *ch_info) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0]; + u8 group; + u16 group_index = 0; /* based on factory calib frequencies */ + u8 grp_channel; + + /* Find the group index for the channel ... don't use index 1(?) */ + if (iwl_legacy_is_channel_a_band(ch_info)) { + for (group = 1; group < 5; group++) { + grp_channel = ch_grp[group].group_channel; + if (ch_info->channel <= grp_channel) { + group_index = group; + break; + } + } + /* group 4 has a few channels *above* its factory cal freq */ + if (group == 5) + group_index = 4; + } else + group_index = 0; /* 2.4 GHz, group 0 */ + + IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel, + group_index); + return group_index; +} + +/** + * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index + * + * Interpolate to get nominal (i.e. at factory calibration temperature) index + * into radio/DSP gain settings table for requested power. + */ +static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv, + s8 requested_power, + s32 setting_index, s32 *new_index) +{ + const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + s32 index0, index1; + s32 power = 2 * requested_power; + s32 i; + const struct iwl3945_eeprom_txpower_sample *samples; + s32 gains0, gains1; + s32 res; + s32 denominator; + + chnl_grp = &eeprom->groups[setting_index]; + samples = chnl_grp->samples; + for (i = 0; i < 5; i++) { + if (power == samples[i].power) { + *new_index = samples[i].gain_index; + return 0; + } + } + + if (power > samples[1].power) { + index0 = 0; + index1 = 1; + } else if (power > samples[2].power) { + index0 = 1; + index1 = 2; + } else if (power > samples[3].power) { + index0 = 2; + index1 = 3; + } else { + index0 = 3; + index1 = 4; + } + + denominator = (s32) samples[index1].power - (s32) samples[index0].power; + if (denominator == 0) + return -EINVAL; + gains0 = (s32) samples[index0].gain_index * (1 << 19); + gains1 = (s32) samples[index1].gain_index * (1 << 19); + res = gains0 + (gains1 - gains0) * + ((s32) power - (s32) samples[index0].power) / denominator + + (1 << 18); + *new_index = res >> 19; + return 0; +} + +static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv) +{ + u32 i; + s32 rate_index; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + const struct iwl3945_eeprom_txpower_group *group; + + IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n"); + + for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { + s8 *clip_pwrs; /* table of power levels for each rate */ + s8 satur_pwr; /* saturation power for each chnl group */ + group = &eeprom->groups[i]; + + /* sanity check on factory saturation power value */ + if (group->saturation_power < 40) { + IWL_WARN(priv, "Error: saturation power is %d, " + "less than minimum expected 40\n", + group->saturation_power); + return; + } + + /* + * Derive requested power levels for each rate, based on + * hardware capabilities (saturation power for band). + * Basic value is 3dB down from saturation, with further + * power reductions for highest 3 data rates. These + * backoffs provide headroom for high rate modulation + * power peaks, without too much distortion (clipping). + */ + /* we'll fill in this array with h/w max power levels */ + clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers; + + /* divide factory saturation power by 2 to find -3dB level */ + satur_pwr = (s8) (group->saturation_power >> 1); + + /* fill in channel group's nominal powers for each rate */ + for (rate_index = 0; + rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) { + switch (rate_index) { + case IWL_RATE_36M_INDEX_TABLE: + if (i == 0) /* B/G */ + *clip_pwrs = satur_pwr; + else /* A */ + *clip_pwrs = satur_pwr - 5; + break; + case IWL_RATE_48M_INDEX_TABLE: + if (i == 0) + *clip_pwrs = satur_pwr - 7; + else + *clip_pwrs = satur_pwr - 10; + break; + case IWL_RATE_54M_INDEX_TABLE: + if (i == 0) + *clip_pwrs = satur_pwr - 9; + else + *clip_pwrs = satur_pwr - 12; + break; + default: + *clip_pwrs = satur_pwr; + break; + } + } + } +} + +/** + * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM + * + * Second pass (during init) to set up priv->channel_info + * + * Set up Tx-power settings in our channel info database for each VALID + * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values + * and current temperature. + * + * Since this is based on current temperature (at init time), these values may + * not be valid for very long, but it gives us a starting/default point, + * and allows us to active (i.e. using Tx) scan. + * + * This does *not* write values to NIC, just sets up our internal table. + */ +int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl3945_channel_power_info *pwr_info; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + int delta_index; + u8 rate_index; + u8 scan_tbl_index; + const s8 *clip_pwrs; /* array of power levels for each rate */ + u8 gain, dsp_atten; + s8 power; + u8 pwr_index, base_pwr_index, a_band; + u8 i; + int temperature; + + /* save temperature reference, + * so we can determine next time to calibrate */ + temperature = iwl3945_hw_reg_txpower_get_temperature(priv); + priv->last_temperature = temperature; + + iwl3945_hw_reg_init_channel_groups(priv); + + /* initialize Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; + i++, ch_info++) { + a_band = iwl_legacy_is_channel_a_band(ch_info); + if (!iwl_legacy_is_channel_valid(ch_info)) + continue; + + /* find this channel's channel group (*not* "band") index */ + ch_info->group_index = + iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); + + /* Get this chnlgrp's rate->max/clip-powers table */ + clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers; + + /* calculate power index *adjustment* value according to + * diff between current temperature and factory temperature */ + delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, + eeprom->groups[ch_info->group_index]. + temperature); + + IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n", + ch_info->channel, delta_index, temperature + + IWL_TEMP_CONVERT); + + /* set tx power value for all OFDM rates */ + for (rate_index = 0; rate_index < IWL_OFDM_RATES; + rate_index++) { + s32 uninitialized_var(power_idx); + int rc; + + /* use channel group's clip-power table, + * but don't exceed channel's max power */ + s8 pwr = min(ch_info->max_power_avg, + clip_pwrs[rate_index]); + + pwr_info = &ch_info->power_info[rate_index]; + + /* get base (i.e. at factory-measured temperature) + * power table index for this rate's power */ + rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr, + ch_info->group_index, + &power_idx); + if (rc) { + IWL_ERR(priv, "Invalid power index\n"); + return rc; + } + pwr_info->base_power_index = (u8) power_idx; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within range of gain table */ + power_idx = iwl3945_hw_reg_fix_power_index(power_idx); + + /* fill 1 OFDM rate's iwl3945_channel_power_info struct */ + pwr_info->requested_power = pwr; + pwr_info->power_table_index = (u8) power_idx; + pwr_info->tpc.tx_gain = + power_gain_table[a_band][power_idx].tx_gain; + pwr_info->tpc.dsp_atten = + power_gain_table[a_band][power_idx].dsp_atten; + } + + /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ + pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE]; + power = pwr_info->requested_power + + IWL_CCK_FROM_OFDM_POWER_DIFF; + pwr_index = pwr_info->power_table_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + base_pwr_index = pwr_info->base_power_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + + /* stay within table range */ + pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index); + gain = power_gain_table[a_band][pwr_index].tx_gain; + dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; + + /* fill each CCK rate's iwl3945_channel_power_info structure + * NOTE: All CCK-rate Txpwrs are the same for a given chnl! + * NOTE: CCK rates start at end of OFDM rates! */ + for (rate_index = 0; + rate_index < IWL_CCK_RATES; rate_index++) { + pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES]; + pwr_info->requested_power = power; + pwr_info->power_table_index = pwr_index; + pwr_info->base_power_index = base_pwr_index; + pwr_info->tpc.tx_gain = gain; + pwr_info->tpc.dsp_atten = dsp_atten; + } + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE; + iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, ch_info, a_band); + } + } + + return 0; +} + +int iwl3945_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + + iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); + rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, + FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + if (rc < 0) + IWL_ERR(priv, "Can't stop Rx DMA.\n"); + + return 0; +} + +int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + struct iwl3945_shared *shared_data = priv->_3945.shared_virt; + + shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); + + iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); + iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); + + iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | + FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | + FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | + FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); + + /* fake read to flush all prev. writes */ + iwl_read32(priv, FH39_TSSR_CBB_BASE); + + return 0; +} + +/* + * HCMD utils + */ +static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case REPLY_RXON: + return sizeof(struct iwl3945_rxon_cmd); + case POWER_TABLE_CMD: + return sizeof(struct iwl3945_powertable_cmd); + default: + return len; + } +} + + +static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data) +{ + struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cpu_to_le16(0); + addsta->rate_n_flags = cmd->rate_n_flags; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + + return (u16)sizeof(struct iwl3945_addsta_cmd); +} + +static int iwl3945_add_bssid_station(struct iwl_priv *priv, + const u8 *addr, u8 *sta_id_r) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int ret; + u8 sta_id; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IWL_INVALID_STATION; + + ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].used |= IWL_STA_LOCAL; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} +static int iwl3945_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + int ret; + + if (add) { + ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + if (ret) + return ret; + + iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id, + (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP); + iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id); + + return 0; + } + + return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +/** + * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table + */ +int iwl3945_init_hw_rate_table(struct iwl_priv *priv) +{ + int rc, i, index, prev_index; + struct iwl3945_rate_scaling_cmd rate_cmd = { + .reserved = {0, 0, 0}, + }; + struct iwl3945_rate_scaling_info *table = rate_cmd.table; + + for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) { + index = iwl3945_rates[i].table_rs_index; + + table[index].rate_n_flags = + iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0); + table[index].try_cnt = priv->retry_rate; + prev_index = iwl3945_get_prev_ieee_rate(i); + table[index].next_rate_index = + iwl3945_rates[prev_index].table_rs_index; + } + + switch (priv->band) { + case IEEE80211_BAND_5GHZ: + IWL_DEBUG_RATE(priv, "Select A mode rate scale\n"); + /* If one of the following CCK rates is used, + * have it fall back to the 6M OFDM rate */ + for (i = IWL_RATE_1M_INDEX_TABLE; + i <= IWL_RATE_11M_INDEX_TABLE; i++) + table[i].next_rate_index = + iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; + + /* Don't fall back to CCK rates */ + table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = + IWL_RATE_9M_INDEX_TABLE; + + /* Don't drop out of OFDM rates */ + table[IWL_RATE_6M_INDEX_TABLE].next_rate_index = + iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; + break; + + case IEEE80211_BAND_2GHZ: + IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n"); + /* If an OFDM rate is used, have it fall back to the + * 1M CCK rates */ + + if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && + iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + + index = IWL_FIRST_CCK_RATE; + for (i = IWL_RATE_6M_INDEX_TABLE; + i <= IWL_RATE_54M_INDEX_TABLE; i++) + table[i].next_rate_index = + iwl3945_rates[index].table_rs_index; + + index = IWL_RATE_11M_INDEX_TABLE; + /* CCK shouldn't fall back to OFDM... */ + table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE; + } + break; + + default: + WARN_ON(1); + break; + } + + /* Update the rate scaling for control frame Tx */ + rate_cmd.table_id = 0; + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); + if (rc) + return rc; + + /* Update the rate scaling for data frame Tx */ + rate_cmd.table_id = 1; + return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); +} + +/* Called when initializing driver */ +int iwl3945_hw_set_hw_params(struct iwl_priv *priv) +{ + memset((void *)&priv->hw_params, 0, + sizeof(struct iwl_hw_params)); + + priv->_3945.shared_virt = + dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct iwl3945_shared), + &priv->_3945.shared_phys, GFP_KERNEL); + if (!priv->_3945.shared_virt) { + IWL_ERR(priv, "failed to allocate pci memory\n"); + return -ENOMEM; + } + + /* Assign number of Usable TX queues */ + priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; + + priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K); + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + priv->hw_params.max_stations = IWL3945_STATION_COUNT; + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID; + + priv->sta_key_max_num = STA_KEY_MAX_NUM; + + priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; + priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; + priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS; + + return 0; +} + +unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl3945_frame *frame, u8 rate) +{ + struct iwl3945_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl3945_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + tx_beacon_cmd->tx.rate = rate; + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK); + + /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/ + tx_beacon_cmd->tx.supp_rates[0] = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + + tx_beacon_cmd->tx.supp_rates[1] = + (IWL_CCK_BASIC_RATES_MASK & 0xF); + + return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size; +} + +void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx; + priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; +} + +void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_DELAYED_WORK(&priv->_3945.thermal_periodic, + iwl3945_bg_reg_txpower_periodic); +} + +void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_delayed_work(&priv->_3945.thermal_periodic); +} + +/* check contents of special bootstrap uCode SRAM */ +static int iwl3945_verify_bsm(struct iwl_priv *priv) + { + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = iwl_legacy_read_prph(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); + + return 0; +} + + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +/* + * Clear the OWNER_MSK, to establish driver (instead of uCode running on + * embedded controller) as EEPROM reader; each read is a series of pulses + * to/from the EEPROM chip, not a single event, so even reads could conflict + * if they weren't arbitrated by some ownership mechanism. Here, the driver + * simply claims ownership, which should be safe when this function is called + * (i.e. before loading uCode!). + */ +static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + return 0; +} + + +static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv) +{ + return; +} + + /** + * iwl3945_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl3945_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO(priv, "Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL39_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 31:0 for 3945. + * NOTE: iwl3945_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr; + pdata = priv->ucode_init_data.p_addr; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_legacy_write_prph(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl3945_verify_bsm(priv); + if (rc) + return rc; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG, + IWL39_RTC_INST_LOWER_BOUND); + iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); + else { + IWL_ERR(priv, "BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + return 0; +} + +static struct iwl_hcmd_ops iwl3945_hcmd = { + .rxon_assoc = iwl3945_send_rxon_assoc, + .commit_rxon = iwl3945_commit_rxon, +}; + +static struct iwl_lib_ops iwl3945_lib = { + .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl3945_hw_txq_free_tfd, + .txq_init = iwl3945_hw_tx_queue_init, + .load_ucode = iwl3945_load_bsm, + .dump_nic_event_log = iwl3945_dump_nic_event_log, + .dump_nic_error_log = iwl3945_dump_nic_error_log, + .apm_ops = { + .init = iwl3945_apm_init, + .config = iwl3945_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_REGULATORY_BAND_NO_HT40, + EEPROM_REGULATORY_BAND_NO_HT40, + }, + .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, + .release_semaphore = iwl3945_eeprom_release_semaphore, + }, + .send_tx_power = iwl3945_send_tx_power, + .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, + + .debugfs_ops = { + .rx_stats_read = iwl3945_ucode_rx_stats_read, + .tx_stats_read = iwl3945_ucode_tx_stats_read, + .general_stats_read = iwl3945_ucode_general_stats_read, + }, +}; + +static const struct iwl_legacy_ops iwl3945_legacy_ops = { + .post_associate = iwl3945_post_associate, + .config_ap = iwl3945_config_ap, + .manage_ibss_station = iwl3945_manage_ibss_station, +}; + +static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { + .get_hcmd_size = iwl3945_get_hcmd_size, + .build_addsta_hcmd = iwl3945_build_addsta_hcmd, + .request_scan = iwl3945_request_scan, + .post_scan = iwl3945_post_scan, +}; + +static const struct iwl_ops iwl3945_ops = { + .lib = &iwl3945_lib, + .hcmd = &iwl3945_hcmd, + .utils = &iwl3945_hcmd_utils, + .led = &iwl3945_led_ops, + .legacy = &iwl3945_legacy_ops, + .ieee80211_ops = &iwl3945_hw_ops, +}; + +static struct iwl_base_params iwl3945_base_params = { + .eeprom_size = IWL3945_EEPROM_IMG_SIZE, + .num_of_queues = IWL39_NUM_QUEUES, + .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, + .set_l0s = false, + .use_bsm = true, + .led_compensation = 64, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .max_event_log_size = 512, +}; + +static struct iwl_cfg iwl3945_bg_cfg = { + .name = "3945BG", + .fw_name_pre = IWL3945_FW_PRE, + .ucode_api_max = IWL3945_UCODE_API_MAX, + .ucode_api_min = IWL3945_UCODE_API_MIN, + .sku = IWL_SKU_G, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &iwl3945_ops, + .mod_params = &iwl3945_mod_params, + .base_params = &iwl3945_base_params, + .led_mode = IWL_LED_BLINK, +}; + +static struct iwl_cfg iwl3945_abg_cfg = { + .name = "3945ABG", + .fw_name_pre = IWL3945_FW_PRE, + .ucode_api_max = IWL3945_UCODE_API_MAX, + .ucode_api_min = IWL3945_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G, + .eeprom_ver = EEPROM_3945_EEPROM_VERSION, + .ops = &iwl3945_ops, + .mod_params = &iwl3945_mod_params, + .base_params = &iwl3945_base_params, + .led_mode = IWL_LED_BLINK, +}; + +DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { + {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)}, + {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)}, + {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids); diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h new file mode 100644 index 000000000000..b118b59b71de --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-3945.h @@ -0,0 +1,308 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-3945.h) for driver implementation definitions. + * Please use iwl-3945-commands.h for uCode API definitions. + * Please use iwl-3945-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_3945_h__ +#define __iwl_3945_h__ + +#include <linux/pci.h> /* for struct pci_device_id */ +#include <linux/kernel.h> +#include <net/ieee80211_radiotap.h> + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +extern const struct pci_device_id iwl3945_hw_card_ids[]; + +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-3945-hw.h" +#include "iwl-debug.h" +#include "iwl-power.h" +#include "iwl-dev.h" +#include "iwl-led.h" + +/* Highest firmware API version supported */ +#define IWL3945_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL3945_UCODE_API_MIN 1 + +#define IWL3945_FW_PRE "iwlwifi-3945-" +#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode" +#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api) + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* Module parameters accessible from iwl-*.c */ +extern struct iwl_mod_params iwl3945_mod_params; + +struct iwl3945_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl3945_rs_sta { + spinlock_t lock; + struct iwl_priv *priv; + s32 *expected_tpt; + unsigned long last_partial_flush; + unsigned long last_flush; + u32 flush_time; + u32 last_tx_packets; + u32 tx_packets; + u8 tgg; + u8 flush_pending; + u8 start_rate; + struct timer_list rate_scale_flush; + struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945]; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_stats_table_file; +#endif + + /* used to be in sta_info */ + int last_txrate_idx; +}; + + +/* + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct iwl3945_sta_priv { + struct iwl_station_priv_common common; + struct iwl3945_rs_sta rs_sta; +}; + +enum iwl3945_antenna { + IWL_ANTENNA_DIVERSITY, + IWL_ANTENNA_MAIN, + IWL_ANTENNA_AUX +}; + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +#define IWL_TX_FIFO_AC0 0 +#define IWL_TX_FIFO_AC1 1 +#define IWL_TX_FIFO_AC2 2 +#define IWL_TX_FIFO_AC3 3 +#define IWL_TX_FIFO_HCCA_1 5 +#define IWL_TX_FIFO_HCCA_2 6 +#define IWL_TX_FIFO_NONE 7 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl3945_frame { + union { + struct ieee80211_hdr frame; + struct iwl3945_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define SCAN_INTERVAL 100 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +#define STA_PS_STATUS_WAKE 0 +#define STA_PS_STATUS_SLEEP 1 + +struct iwl3945_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\ + x->u.rx_frame.stats.payload + \ + x->u.rx_frame.stats.phy_count)) +#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\ + IWL_RX_HDR(x)->payload + \ + le16_to_cpu(IWL_RX_HDR(x)->len))) +#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) +#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) + + +/****************************************************************************** + * + * Functions implemented in iwl3945-base.c which are forward declared here + * for use by iwl-*.c + * + *****************************************************************************/ +extern int iwl3945_calc_db_from_ratio(int sig_ratio); +extern void iwl3945_rx_replenish(void *data); +extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, int left); +extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display); +extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); + +/****************************************************************************** + * + * Functions implemented in iwl-[34]*.c which are forward declared here + * for use by iwl3945-base.c + * + * NOTE: The implementation of these functions are hardware specific + * which is why they are in the hardware specific files (vs. iwl-base.c) + * + * Naming convention -- + * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_) + * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl3945_bg_ <-- Called from work queue context + * iwl3945_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv); +extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv); +extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv); +extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv); +extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv); +extern int iwl3945_hw_nic_init(struct iwl_priv *priv); +extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv); +extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv); +extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv); +extern int iwl3945_hw_nic_reset(struct iwl_priv *priv); +extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad); +extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern int iwl3945_hw_get_temperature(struct iwl_priv *priv); +extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl3945_frame *frame, u8 rate); +void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id); +extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv); +extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); +extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl3945_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +extern void iwl3945_disable_events(struct iwl_priv *priv); +extern int iwl4965_get_temperature(const struct iwl_priv *priv); +extern void iwl3945_post_associate(struct iwl_priv *priv); +extern void iwl3945_config_ap(struct iwl_priv *priv); + +extern int iwl3945_commit_rxon(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/** + * iwl3945_hw_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + * + * NOTE: This should not be hardware specific but the code has + * not yet been merged into a single common layer for managing the + * station tables. + */ +extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); + +extern struct ieee80211_ops iwl3945_hw_ops; + +/* + * Forward declare iwl-3945.c functions for iwl3945-base.c + */ +extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); +extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); +extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); +extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); + +extern const struct iwl_channel_info *iwl3945_get_channel_info( + const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); + +extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); + +/* scanning */ +int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); +void iwl3945_post_scan(struct iwl_priv *priv); + +/* rates */ +extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945]; + +/* Requires full declaration of iwl_priv before including */ +#include "iwl-io.h" + +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c new file mode 100644 index 000000000000..81d6a25eb04f --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c @@ -0,0 +1,967 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#include <linux/slab.h> +#include <net/mac80211.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-4965-calib.h" + +/***************************************************************************** + * INIT calibrations framework + *****************************************************************************/ + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +void iwl4965_calib_free_results(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < IWL_CALIB_MAX; i++) { + kfree(priv->calib_results[i].buf); + priv->calib_results[i].buf = NULL; + priv->calib_results[i].buf_len = 0; + } +} + +/***************************************************************************** + * RUNTIME calibrations framework + *****************************************************************************/ + +/* "false alarms" are signals that our DSP tries to lock onto, + * but then determines that they are either noise, or transmissions + * from a distant wireless network (also "noise", really) that get + * "stepped on" by stronger transmissions within our own network. + * This algorithm attempts to set a sensitivity level that is high + * enough to receive all of our own network traffic, but not so + * high that our DSP gets too busy trying to lock onto non-network + * activity/noise. */ +static int iwl4965_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) +{ + u32 max_nrg_cck = 0; + int i = 0; + u8 max_silence_rssi = 0; + u32 silence_ref = 0; + u8 silence_rssi_a = 0; + u8 silence_rssi_b = 0; + u8 silence_rssi_c = 0; + u32 val; + + /* "false_alarms" values below are cross-multiplications to assess the + * numbers of false alarms within the measured period of actual Rx + * (Rx is off when we're txing), vs the min/max expected false alarms + * (some should be expected if rx is sensitive enough) in a + * hypothetical listening period of 200 time units (TU), 204.8 msec: + * + * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time + * + * */ + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; + u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + data->nrg_auto_corr_silence_diff = 0; + + /* Find max silence rssi among all 3 receivers. + * This is background noise, which may include transmissions from other + * networks, measured during silence before our network's beacon */ + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER) >> 8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER) >> 8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER) >> 8); + + val = max(silence_rssi_b, silence_rssi_c); + max_silence_rssi = max(silence_rssi_a, (u8) val); + + /* Store silence rssi in 20-beacon history table */ + data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; + data->nrg_silence_idx++; + if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) + data->nrg_silence_idx = 0; + + /* Find max silence rssi across 20 beacon history */ + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { + val = data->nrg_silence_rssi[i]; + silence_ref = max(silence_ref, val); + } + IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); + + /* Find max rx energy (min value!) among all 3 receivers, + * measured during beacon frame. + * Save it in 10-beacon history table. */ + i = data->nrg_energy_idx; + val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); + data->nrg_value[i] = min(rx_info->beacon_energy_a, val); + + data->nrg_energy_idx++; + if (data->nrg_energy_idx >= 10) + data->nrg_energy_idx = 0; + + /* Find min rx energy (max value) across 10 beacon history. + * This is the minimum signal level that we want to receive well. + * Add backoff (margin so we don't miss slightly lower energy frames). + * This establishes an upper bound (min value) for energy threshold. */ + max_nrg_cck = data->nrg_value[0]; + for (i = 1; i < 10; i++) + max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); + max_nrg_cck += 6; + + IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); + + /* Count number of consecutive beacons with fewer-than-desired + * false alarms. */ + if (false_alarms < min_false_alarms) + data->num_in_cck_no_fa++; + else + data->num_in_cck_no_fa = 0; + IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); + + /* If we got too many false alarms this time, reduce sensitivity */ + if ((false_alarms > max_false_alarms) && + (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; + /* Store for "fewer than desired" on later beacon */ + data->nrg_silence_ref = silence_ref; + + /* increase energy threshold (reduce nrg value) + * to decrease sensitivity */ + data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; + /* Else if we got fewer than desired, increase sensitivity */ + } else if (false_alarms < min_false_alarms) { + data->nrg_curr_state = IWL_FA_TOO_FEW; + + /* Compare silence level with silence level for most recent + * healthy number or too many false alarms */ + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; + + IWL_DEBUG_CALIB(priv, + "norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); + + /* Increase value to increase sensitivity, but only if: + * 1a) previous beacon did *not* have *too many* false alarms + * 1b) AND there's a significant difference in Rx levels + * from a previous beacon with too many, or healthy # FAs + * OR 2) We've seen a lot of beacons (100) with too few + * false alarms */ + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); + /* Increase nrg value to increase sensitivity */ + val = data->nrg_th_cck + NRG_STEP_CCK; + data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); + } else { + IWL_DEBUG_CALIB(priv, + "... but not changing sensitivity\n"); + } + + /* Else we got a healthy number of false alarms, keep status quo */ + } else { + IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; + + /* Store for use in "fewer than desired" with later beacon */ + data->nrg_silence_ref = silence_ref; + + /* If previous beacon had too many false alarms, + * give it some extra margin by reducing sensitivity again + * (but don't go below measured energy of desired Rx) */ + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB(priv, "... increasing margin\n"); + if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) + data->nrg_th_cck -= NRG_MARGIN; + else + data->nrg_th_cck = max_nrg_cck; + } + } + + /* Make sure the energy threshold does not go above the measured + * energy of the desired Rx signals (reduced by backoff margin), + * or else we might start missing Rx frames. + * Lower value is higher energy, so we use max()! + */ + data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); + IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); + + data->nrg_prev_state = data->nrg_curr_state; + + /* Auto-correlation CCK algorithm */ + if (false_alarms > min_false_alarms) { + + /* increase auto_corr values to decrease sensitivity + * so the DSP won't be disturbed by the noise + */ + if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) + data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; + else { + val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + min((u32)ranges->auto_corr_max_cck, val); + } + val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + min((u32)ranges->auto_corr_max_cck_mrc, val); + } else if ((false_alarms < min_false_alarms) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + /* Decrease auto_corr values to increase sensitivity */ + val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + max((u32)ranges->auto_corr_min_cck, val); + val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + max((u32)ranges->auto_corr_min_cck_mrc, val); + } + + return 0; +} + + +static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) +{ + u32 val; + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; + u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); + + val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + min((u32)ranges->auto_corr_max_ofdm, val); + + val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + min((u32)ranges->auto_corr_max_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + min((u32)ranges->auto_corr_max_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); + } + + /* Else if we got fewer than desired, increase sensitivity */ + else if (false_alarms < min_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); + + val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + max((u32)ranges->auto_corr_min_ofdm, val); + + val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + max((u32)ranges->auto_corr_min_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + max((u32)ranges->auto_corr_min_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); + } else { + IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); + } + return 0; +} + +static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, + struct iwl_sensitivity_data *data, + __le16 *tbl) +{ + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + cpu_to_le16(data->barker_corr_th_min); + tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16(data->barker_corr_th_min_mrc); + tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = + cpu_to_le16(data->nrg_th_cca); + + IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl4965_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), + .flags = CMD_ASYNC, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); + + /* Update uCode's "work" table, and copy it to DSP */ + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); + + return iwl_legacy_send_cmd(priv, &cmd_out); +} + +void iwl4965_init_sensitivity(struct iwl_priv *priv) +{ + int ret = 0; + int i; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + if (priv->disable_sens_cal) + return; + + IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); + + /* Clear driver's sensitivity algo data */ + data = &(priv->sensitivity_data); + + if (ranges == NULL) + return; + + memset(data, 0, sizeof(struct iwl_sensitivity_data)); + + data->num_in_cck_no_fa = 0; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_silence_ref = 0; + data->nrg_silence_idx = 0; + data->nrg_energy_idx = 0; + + for (i = 0; i < 10; i++) + data->nrg_value[i] = 0; + + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) + data->nrg_silence_rssi[i] = 0; + + data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; + data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; + data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; + data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; + data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; + data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; + data->nrg_th_cck = ranges->nrg_th_cck; + data->nrg_th_ofdm = ranges->nrg_th_ofdm; + data->barker_corr_th_min = ranges->barker_corr_th_min; + data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc; + data->nrg_th_cca = ranges->nrg_th_cca; + + data->last_bad_plcp_cnt_ofdm = 0; + data->last_fa_cnt_ofdm = 0; + data->last_bad_plcp_cnt_cck = 0; + data->last_fa_cnt_cck = 0; + + ret |= iwl4965_sensitivity_write(priv); + IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); +} + +void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) +{ + u32 rx_enable_time; + u32 fa_cck; + u32 fa_ofdm; + u32 bad_plcp_cck; + u32 bad_plcp_ofdm; + u32 norm_fa_ofdm; + u32 norm_fa_cck; + struct iwl_sensitivity_data *data = NULL; + struct statistics_rx_non_phy *rx_info; + struct statistics_rx_phy *ofdm, *cck; + unsigned long flags; + struct statistics_general_data statis; + + if (priv->disable_sens_cal) + return; + + data = &(priv->sensitivity_data); + + if (!iwl_legacy_is_any_associated(priv)) { + IWL_DEBUG_CALIB(priv, "<< - not associated\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); + ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); + cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); + + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Extract Statistics: */ + rx_enable_time = le32_to_cpu(rx_info->channel_load); + fa_cck = le32_to_cpu(cck->false_alarm_cnt); + fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(cck->plcp_err); + bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); + + statis.beacon_silence_rssi_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a); + statis.beacon_silence_rssi_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b); + statis.beacon_silence_rssi_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(rx_info->beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(rx_info->beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(rx_info->beacon_energy_c); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); + + if (!rx_enable_time) { + IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); + return; + } + + /* These statistics increase monotonically, and do not reset + * at each beacon. Calculate difference from last value, or just + * use the new statistics value if it has reset or wrapped around. */ + if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) + data->last_bad_plcp_cnt_cck = bad_plcp_cck; + else { + bad_plcp_cck -= data->last_bad_plcp_cnt_cck; + data->last_bad_plcp_cnt_cck += bad_plcp_cck; + } + + if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) + data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; + else { + bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; + data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; + } + + if (data->last_fa_cnt_ofdm > fa_ofdm) + data->last_fa_cnt_ofdm = fa_ofdm; + else { + fa_ofdm -= data->last_fa_cnt_ofdm; + data->last_fa_cnt_ofdm += fa_ofdm; + } + + if (data->last_fa_cnt_cck > fa_cck) + data->last_fa_cnt_cck = fa_cck; + else { + fa_cck -= data->last_fa_cnt_cck; + data->last_fa_cnt_cck += fa_cck; + } + + /* Total aborted signal locks */ + norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; + norm_fa_cck = fa_cck + bad_plcp_cck; + + IWL_DEBUG_CALIB(priv, + "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + + iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + + iwl4965_sensitivity_write(priv); +} + +static inline u8 iwl4965_find_first_chain(u8 mask) +{ + if (mask & ANT_A) + return CHAIN_A; + if (mask & ANT_B) + return CHAIN_B; + return CHAIN_C; +} + +/** + * Run disconnected antenna algorithm to find out which antennas are + * disconnected. + */ +static void +iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, + struct iwl_chain_noise_data *data) +{ + u32 active_chains = 0; + u32 max_average_sig; + u16 max_average_sig_antenna_i; + u8 num_tx_chains; + u8 first_chain; + u16 i = 0; + + average_sig[0] = data->chain_signal_a / + priv->cfg->base_params->chain_noise_num_beacons; + average_sig[1] = data->chain_signal_b / + priv->cfg->base_params->chain_noise_num_beacons; + average_sig[2] = data->chain_signal_c / + priv->cfg->base_params->chain_noise_num_beacons; + + if (average_sig[0] >= average_sig[1]) { + max_average_sig = average_sig[0]; + max_average_sig_antenna_i = 0; + active_chains = (1 << max_average_sig_antenna_i); + } else { + max_average_sig = average_sig[1]; + max_average_sig_antenna_i = 1; + active_chains = (1 << max_average_sig_antenna_i); + } + + if (average_sig[2] >= max_average_sig) { + max_average_sig = average_sig[2]; + max_average_sig_antenna_i = 2; + active_chains = (1 << max_average_sig_antenna_i); + } + + IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); + + /* Compare signal strengths for all 3 receivers. */ + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (i != max_average_sig_antenna_i) { + s32 rssi_delta = (max_average_sig - average_sig[i]); + + /* If signal is very weak, compared with + * strongest, mark it as disconnected. */ + if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) + data->disconn_array[i] = 1; + else + active_chains |= (1 << i); + IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); + } + } + + /* + * The above algorithm sometimes fails when the ucode + * reports 0 for all chains. It's not clear why that + * happens to start with, but it is then causing trouble + * because this can make us enable more chains than the + * hardware really has. + * + * To be safe, simply mask out any chains that we know + * are not on the device. + */ + active_chains &= priv->hw_params.valid_rx_ant; + + num_tx_chains = 0; + for (i = 0; i < NUM_RX_CHAINS; i++) { + /* loops on all the bits of + * priv->hw_setting.valid_tx_ant */ + u8 ant_msk = (1 << i); + if (!(priv->hw_params.valid_tx_ant & ant_msk)) + continue; + + num_tx_chains++; + if (data->disconn_array[i] == 0) + /* there is a Tx antenna connected */ + break; + if (num_tx_chains == priv->hw_params.tx_chains_num && + data->disconn_array[i]) { + /* + * If all chains are disconnected + * connect the first valid tx chain + */ + first_chain = + iwl4965_find_first_chain(priv->cfg->valid_tx_ant); + data->disconn_array[first_chain] = 0; + active_chains |= BIT(first_chain); + IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \ + W/A - declare %d as connected\n", + first_chain); + break; + } + } + + if (active_chains != priv->hw_params.valid_rx_ant && + active_chains != priv->chain_noise_data.active_chains) + IWL_DEBUG_CALIB(priv, + "Detected that not all antennas are connected! " + "Connected: %#x, valid: %#x.\n", + active_chains, priv->hw_params.valid_rx_ant); + + /* Save for use within RXON, TX, SCAN commands, etc. */ + data->active_chains = active_chains; + IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", + active_chains); +} + +static void iwl4965_gain_computation(struct iwl_priv *priv, + u32 *average_noise, + u16 min_average_noise_antenna_i, + u32 min_average_noise, + u8 default_chain) +{ + int i, ret; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + + data->delta_gain_code[min_average_noise_antenna_i] = 0; + + for (i = default_chain; i < NUM_RX_CHAINS; i++) { + s32 delta_g = 0; + + if (!(data->disconn_array[i]) && + (data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { + delta_g = average_noise[i] - min_average_noise; + data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); + data->delta_gain_code[i] = + min(data->delta_gain_code[i], + (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + + data->delta_gain_code[i] = + (data->delta_gain_code[i] | (1 << 2)); + } else { + data->delta_gain_code[i] = 0; + } + } + IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", + data->delta_gain_code[0], + data->delta_gain_code[1], + data->delta_gain_code[2]); + + /* Differential gain gets sent to uCode only once */ + if (!data->radio_write) { + struct iwl_calib_diff_gain_cmd cmd; + data->radio_write = 1; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = data->delta_gain_code[0]; + cmd.diff_gain_b = data->delta_gain_code[1]; + cmd.diff_gain_c = data->delta_gain_code[2]; + ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_DEBUG_CALIB(priv, "fail sending cmd " + "REPLY_PHY_CALIBRATION_CMD\n"); + + /* TODO we might want recalculate + * rx_chain in rxon cmd */ + + /* Mark so we run this algo only once! */ + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } +} + + + +/* + * Accumulate 16 beacons of signal and noise statistics for each of + * 3 receivers/antennas/rx-chains, then figure out: + * 1) Which antennas are connected. + * 2) Differential rx gain settings to balance the 3 receivers. + */ +void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) +{ + struct iwl_chain_noise_data *data = NULL; + + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_sig_a; + u32 chain_sig_b; + u32 chain_sig_c; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; + u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; + u16 i = 0; + u16 rxon_chnum = INITIALIZATION_VALUE; + u16 stat_chnum = INITIALIZATION_VALUE; + u8 rxon_band24; + u8 stat_band24; + unsigned long flags; + struct statistics_rx_non_phy *rx_info; + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (priv->disable_chain_noise_cal) + return; + + data = &(priv->chain_noise_data); + + /* + * Accumulate just the first "chain_noise_num_beacons" after + * the first association, then we're done forever. + */ + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> + rx.general); + + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); + rxon_chnum = le16_to_cpu(ctx->staging.channel); + + stat_band24 = !!(((struct iwl_notif_statistics *) + stat_resp)->flag & + STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) + stat_resp)->flag) >> 16; + + /* Make sure we accumulate data for just the associated channel + * (even if scanning). */ + if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { + IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", + rxon_chnum, rxon_band24); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* + * Accumulate beacon statistics values across + * "chain_noise_num_beacons" + */ + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; + + chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; + chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; + chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; + + spin_unlock_irqrestore(&priv->lock, flags); + + data->beacon_count++; + + data->chain_noise_a = (chain_noise_a + data->chain_noise_a); + data->chain_noise_b = (chain_noise_b + data->chain_noise_b); + data->chain_noise_c = (chain_noise_c + data->chain_noise_c); + + data->chain_signal_a = (chain_sig_a + data->chain_signal_a); + data->chain_signal_b = (chain_sig_b + data->chain_signal_b); + data->chain_signal_c = (chain_sig_c + data->chain_signal_c); + + IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", + rxon_chnum, rxon_band24, data->beacon_count); + IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); + + /* If this is the "chain_noise_num_beacons", determine: + * 1) Disconnected antennas (using signal strengths) + * 2) Differential gain (using silence noise) to balance receivers */ + if (data->beacon_count != + priv->cfg->base_params->chain_noise_num_beacons) + return; + + /* Analyze signal for disconnected antenna */ + iwl4965_find_disconn_antenna(priv, average_sig, data); + + /* Analyze noise for rx balance */ + average_noise[0] = data->chain_noise_a / + priv->cfg->base_params->chain_noise_num_beacons; + average_noise[1] = data->chain_noise_b / + priv->cfg->base_params->chain_noise_num_beacons; + average_noise[2] = data->chain_noise_c / + priv->cfg->base_params->chain_noise_num_beacons; + + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { + /* This means that chain i is active and has + * lower noise values so far: */ + min_average_noise = average_noise[i]; + min_average_noise_antenna_i = i; + } + } + + IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); + + IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); + + iwl4965_gain_computation(priv, average_noise, + min_average_noise_antenna_i, min_average_noise, + iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); + + /* Some power changes may have been made during the calibration. + * Update and commit the RXON + */ + if (priv->cfg->ops->lib->update_chain_flags) + priv->cfg->ops->lib->update_chain_flags(priv); + + data->state = IWL_CHAIN_NOISE_DONE; + iwl_legacy_power_update_mode(priv, false); +} + +void iwl4965_reset_run_time_calib(struct iwl_priv *priv) +{ + int i; + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); + for (i = 0; i < NUM_RX_CHAINS; i++) + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; + + /* Ask for statistics now, the uCode will send notification + * periodically after association */ + iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h new file mode 100644 index 000000000000..f46c80e6e005 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h @@ -0,0 +1,75 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ +#ifndef __iwl_4965_calib_h__ +#define __iwl_4965_calib_h__ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-commands.h" + +void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp); +void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp); +void iwl4965_init_sensitivity(struct iwl_priv *priv); +void iwl4965_reset_run_time_calib(struct iwl_priv *priv); +void iwl4965_calib_free_results(struct iwl_priv *priv); + +#endif /* __iwl_4965_calib_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c new file mode 100644 index 000000000000..1c93665766e4 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c @@ -0,0 +1,774 @@ +/****************************************************************************** +* +* GPL LICENSE SUMMARY +* +* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, +* USA +* +* The full GNU General Public License is included in this distribution +* in the file called LICENSE.GPL. +* +* Contact Information: +* Intel Linux Wireless <ilw@linux.intel.com> +* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +*****************************************************************************/ +#include "iwl-4965.h" +#include "iwl-4965-debugfs.h" + +static const char *fmt_value = " %-30s %10u\n"; +static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +static const char *fmt_header = + "%-32s current cumulative delta max\n"; + +static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + flag = le32_to_cpu(priv->_4965.statistics.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + + return p; +} + +ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_rx_phy) * 40 + + sizeof(struct statistics_rx_non_phy) * 40 + + sizeof(struct statistics_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct statistics_rx_non_phy *general, *accum_general; + struct statistics_rx_non_phy *delta_general, *max_general; + struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &priv->_4965.statistics.rx.ofdm; + cck = &priv->_4965.statistics.rx.cck; + general = &priv->_4965.statistics.rx.general; + ht = &priv->_4965.statistics.rx.ofdm_ht; + accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm; + accum_cck = &priv->_4965.accum_statistics.rx.cck; + accum_general = &priv->_4965.accum_statistics.rx.general; + accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht; + delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm; + delta_cck = &priv->_4965.delta_statistics.rx.cck; + delta_general = &priv->_4965.delta_statistics.rx.general; + delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht; + max_ofdm = &priv->_4965.max_delta.rx.ofdm; + max_cck = &priv->_4965.max_delta.rx.cck; + max_general = &priv->_4965.max_delta.rx.general; + max_ht = &priv->_4965.max_delta.rx.ofdm_ht; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, + max_ofdm->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, + delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, + delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, + delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, delta_cck->overrun_err, + max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, delta_cck->sfd_timeout, + max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, delta_cck->fina_timeout, + max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, + delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), + accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, + max_cck->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(cck->mh_format_err), + accum_cck->mh_format_err, delta_cck->mh_format_err, + max_cck->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, delta_general->bogus_cts, + max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, delta_general->bogus_ack, + max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, + delta_general->channel_load, + max_general->channel_load); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, + max_general->beacon_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, + max_general->beacon_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, + max_general->beacon_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM_HT:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, + delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ht->mh_format_err), + accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), + accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), + accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), + accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl4965_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_tx) * 48) + 250; + ssize_t ret; + struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + tx = &priv->_4965.statistics.tx; + accum_tx = &priv->_4965.accum_statistics.tx; + delta_tx = &priv->_4965.delta_statistics.tx; + max_tx = &priv->_4965.max_delta.tx; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), + accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, + max_tx->dump_msdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), + accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, + max_tx->agg.ba_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, + delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg underrun:", + le32_to_cpu(tx->agg.underrun), + accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, + delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, + delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_general) * 10 + 300; + ssize_t ret; + struct statistics_general_common *general, *accum_general; + struct statistics_general_common *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + general = &priv->_4965.statistics.general.common; + dbg = &priv->_4965.statistics.general.common.dbg; + div = &priv->_4965.statistics.general.common.div; + accum_general = &priv->_4965.accum_statistics.general.common; + accum_dbg = &priv->_4965.accum_statistics.general.common.dbg; + accum_div = &priv->_4965.accum_statistics.general.common.div; + delta_general = &priv->_4965.delta_statistics.general.common; + max_general = &priv->_4965.max_delta.general.common; + delta_dbg = &priv->_4965.delta_statistics.general.common.dbg; + max_dbg = &priv->_4965.max_delta.general.common.dbg; + delta_div = &priv->_4965.delta_statistics.general.common.div; + max_div = &priv->_4965.max_delta.general.common.div; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature:", + le32_to_cpu(general->temperature)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "ttl_timestamp:", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h new file mode 100644 index 000000000000..6c8e35361a9e --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h @@ -0,0 +1,59 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl4965_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos); +#else +static ssize_t +iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +static ssize_t +iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +static ssize_t +iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c new file mode 100644 index 000000000000..cb9baab1ff7d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c @@ -0,0 +1,154 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-4965.h" +#include "iwl-io.h" + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + u16 count; + int ret; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_IO(priv, + "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } + } + + return ret; +} + +void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv) +{ + iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + +} + +int iwl4965_eeprom_check_version(struct iwl_priv *priv) +{ + u16 eeprom_ver; + u16 calib_ver; + + eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); + calib_ver = iwl_legacy_eeprom_query16(priv, + EEPROM_4965_CALIB_VERSION_OFFSET); + + if (eeprom_ver < priv->cfg->eeprom_ver || + calib_ver < priv->cfg->eeprom_calib_ver) + goto err; + + IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", + eeprom_ver, calib_ver); + + return 0; +err: + IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " + "CALIB=0x%x < 0x%x\n", + eeprom_ver, priv->cfg->eeprom_ver, + calib_ver, priv->cfg->eeprom_calib_ver); + return -EINVAL; + +} + +void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) +{ + const u8 *addr = iwl_legacy_eeprom_query_addr(priv, + EEPROM_MAC_ADDRESS); + memcpy(mac, addr, ETH_ALEN); +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h new file mode 100644 index 000000000000..08b189c8472d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h @@ -0,0 +1,814 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-4965-hw.h) only for hardware-related definitions. + * Use iwl-commands.h for uCode API definitions. + * Use iwl-dev.h for driver implementation definitions. + */ + +#ifndef __iwl_4965_hw_h__ +#define __iwl_4965_hw_h__ + +#include "iwl-fh.h" + +/* EEPROM */ +#define IWL4965_EEPROM_IMG_SIZE 1024 + +/* + * uCode queue management definitions ... + * The first queue used for block-ack aggregation is #7 (4965 only). + * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. + */ +#define IWL49_FIRST_AMPDU_QUEUE 7 + +/* Sizes and addresses for instruction and data memory (SRAM) in + * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ +#define IWL49_RTC_INST_LOWER_BOUND (0x000000) +#define IWL49_RTC_INST_UPPER_BOUND (0x018000) + +#define IWL49_RTC_DATA_LOWER_BOUND (0x800000) +#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000) + +#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \ + IWL49_RTC_INST_LOWER_BOUND) +#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \ + IWL49_RTC_DATA_LOWER_BOUND) + +#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE +#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE + +/* Size of uCode instruction memory in bootstrap state machine */ +#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE + +static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= IWL49_RTC_DATA_LOWER_BOUND) && + (addr < IWL49_RTC_DATA_UPPER_BOUND); +} + +/********************* START TEMPERATURE *************************************/ + +/** + * 4965 temperature calculation. + * + * The driver must calculate the device temperature before calculating + * a txpower setting (amplifier gain is temperature dependent). The + * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration + * values used for the life of the driver, and one of which (R4) is the + * real-time temperature indicator. + * + * uCode provides all 4 values to the driver via the "initialize alive" + * notification (see struct iwl4965_init_alive_resp). After the runtime uCode + * image loads, uCode updates the R4 value via statistics notifications + * (see STATISTICS_NOTIFICATION), which occur after each received beacon + * when associated, or can be requested via REPLY_STATISTICS_CMD. + * + * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver + * must sign-extend to 32 bits before applying formula below. + * + * Formula: + * + * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8 + * + * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is + * an additional correction, which should be centered around 0 degrees + * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for + * centering the 97/100 correction around 0 degrees K. + * + * Add 273 to Kelvin value to find degrees Celsius, for comparing current + * temperature with factory-measured temperatures when calculating txpower + * settings. + */ +#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 +#define TEMPERATURE_CALIB_A_VAL 259 + +/* Limit range of calculated temperature to be between these Kelvin values */ +#define IWL_TX_POWER_TEMPERATURE_MIN (263) +#define IWL_TX_POWER_TEMPERATURE_MAX (410) + +#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ + (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \ + ((t) > IWL_TX_POWER_TEMPERATURE_MAX)) + +/********************* END TEMPERATURE ***************************************/ + +/********************* START TXPOWER *****************************************/ + +/** + * 4965 txpower calculations rely on information from three sources: + * + * 1) EEPROM + * 2) "initialize" alive notification + * 3) statistics notifications + * + * EEPROM data consists of: + * + * 1) Regulatory information (max txpower and channel usage flags) is provided + * separately for each channel that can possibly supported by 4965. + * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz + * (legacy) channels. + * + * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom + * for locations in EEPROM. + * + * 2) Factory txpower calibration information is provided separately for + * sub-bands of contiguous channels. 2.4GHz has just one sub-band, + * but 5 GHz has several sub-bands. + * + * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided. + * + * See struct iwl4965_eeprom_calib_info (and the tree of structures + * contained within it) for format, and struct iwl4965_eeprom for + * locations in EEPROM. + * + * "Initialization alive" notification (see struct iwl4965_init_alive_resp) + * consists of: + * + * 1) Temperature calculation parameters. + * + * 2) Power supply voltage measurement. + * + * 3) Tx gain compensation to balance 2 transmitters for MIMO use. + * + * Statistics notifications deliver: + * + * 1) Current values for temperature param R4. + */ + +/** + * To calculate a txpower setting for a given desired target txpower, channel, + * modulation bit rate, and transmitter chain (4965 has 2 transmitters to + * support MIMO and transmit diversity), driver must do the following: + * + * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel. + * Do not exceed regulatory limit; reduce target txpower if necessary. + * + * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), + * 2 transmitters will be used simultaneously; driver must reduce the + * regulatory limit by 3 dB (half-power) for each transmitter, so the + * combined total output of the 2 transmitters is within regulatory limits. + * + * + * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by + * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]); + * reduce target txpower if necessary. + * + * Backoff values below are in 1/2 dB units (equivalent to steps in + * txpower gain tables): + * + * OFDM 6 - 36 MBit: 10 steps (5 dB) + * OFDM 48 MBit: 15 steps (7.5 dB) + * OFDM 54 MBit: 17 steps (8.5 dB) + * OFDM 60 MBit: 20 steps (10 dB) + * CCK all rates: 10 steps (5 dB) + * + * Backoff values apply to saturation txpower on a per-transmitter basis; + * when using MIMO (2 transmitters), each transmitter uses the same + * saturation level provided in EEPROM, and the same backoff values; + * no reduction (such as with regulatory txpower limits) is required. + * + * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel + * widths and 40 Mhz (.11n HT40) channel widths; there is no separate + * factory measurement for ht40 channels. + * + * The result of this step is the final target txpower. The rest of + * the steps figure out the proper settings for the device to achieve + * that target txpower. + * + * + * 3) Determine (EEPROM) calibration sub band for the target channel, by + * comparing against first and last channels in each sub band + * (see struct iwl4965_eeprom_calib_subband_info). + * + * + * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, + * referencing the 2 factory-measured (sample) channels within the sub band. + * + * Interpolation is based on difference between target channel's frequency + * and the sample channels' frequencies. Since channel numbers are based + * on frequency (5 MHz between each channel number), this is equivalent + * to interpolating based on channel number differences. + * + * Note that the sample channels may or may not be the channels at the + * edges of the sub band. The target channel may be "outside" of the + * span of the sampled channels. + * + * Driver may choose the pair (for 2 Tx chains) of measurements (see + * struct iwl4965_eeprom_calib_ch_info) for which the actual measured + * txpower comes closest to the desired txpower. Usually, though, + * the middle set of measurements is closest to the regulatory limits, + * and is therefore a good choice for all txpower calculations (this + * assumes that high accuracy is needed for maximizing legal txpower, + * while lower txpower configurations do not need as much accuracy). + * + * Driver should interpolate both members of the chosen measurement pair, + * i.e. for both Tx chains (radio transmitters), unless the driver knows + * that only one of the chains will be used (e.g. only one tx antenna + * connected, but this should be unusual). The rate scaling algorithm + * switches antennas to find best performance, so both Tx chains will + * be used (although only one at a time) even for non-MIMO transmissions. + * + * Driver should interpolate factory values for temperature, gain table + * index, and actual power. The power amplifier detector values are + * not used by the driver. + * + * Sanity check: If the target channel happens to be one of the sample + * channels, the results should agree with the sample channel's + * measurements! + * + * + * 5) Find difference between desired txpower and (interpolated) + * factory-measured txpower. Using (interpolated) factory gain table index + * (shown elsewhere) as a starting point, adjust this index lower to + * increase txpower, or higher to decrease txpower, until the target + * txpower is reached. Each step in the gain table is 1/2 dB. + * + * For example, if factory measured txpower is 16 dBm, and target txpower + * is 13 dBm, add 6 steps to the factory gain index to reduce txpower + * by 3 dB. + * + * + * 6) Find difference between current device temperature and (interpolated) + * factory-measured temperature for sub-band. Factory values are in + * degrees Celsius. To calculate current temperature, see comments for + * "4965 temperature calculation". + * + * If current temperature is higher than factory temperature, driver must + * increase gain (lower gain table index), and vice verse. + * + * Temperature affects gain differently for different channels: + * + * 2.4 GHz all channels: 3.5 degrees per half-dB step + * 5 GHz channels 34-43: 4.5 degrees per half-dB step + * 5 GHz channels >= 44: 4.0 degrees per half-dB step + * + * NOTE: Temperature can increase rapidly when transmitting, especially + * with heavy traffic at high txpowers. Driver should update + * temperature calculations often under these conditions to + * maintain strong txpower in the face of rising temperature. + * + * + * 7) Find difference between current power supply voltage indicator + * (from "initialize alive") and factory-measured power supply voltage + * indicator (EEPROM). + * + * If the current voltage is higher (indicator is lower) than factory + * voltage, gain should be reduced (gain table index increased) by: + * + * (eeprom - current) / 7 + * + * If the current voltage is lower (indicator is higher) than factory + * voltage, gain should be increased (gain table index decreased) by: + * + * 2 * (current - eeprom) / 7 + * + * If number of index steps in either direction turns out to be > 2, + * something is wrong ... just use 0. + * + * NOTE: Voltage compensation is independent of band/channel. + * + * NOTE: "Initialize" uCode measures current voltage, which is assumed + * to be constant after this initial measurement. Voltage + * compensation for txpower (number of steps in gain table) + * may be calculated once and used until the next uCode bootload. + * + * + * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31), + * adjust txpower for each transmitter chain, so txpower is balanced + * between the two chains. There are 5 pairs of tx_atten[group][chain] + * values in "initialize alive", one pair for each of 5 channel ranges: + * + * Group 0: 5 GHz channel 34-43 + * Group 1: 5 GHz channel 44-70 + * Group 2: 5 GHz channel 71-124 + * Group 3: 5 GHz channel 125-200 + * Group 4: 2.4 GHz all channels + * + * Add the tx_atten[group][chain] value to the index for the target chain. + * The values are signed, but are in pairs of 0 and a non-negative number, + * so as to reduce gain (if necessary) of the "hotter" channel. This + * avoids any need to double-check for regulatory compliance after + * this step. + * + * + * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation + * value to the index: + * + * Hardware rev B: 9 steps (4.5 dB) + * Hardware rev C: 5 steps (2.5 dB) + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + * + * NOTE: This compensation is in addition to any saturation backoff that + * might have been applied in an earlier step. + * + * + * 10) Select the gain table, based on band (2.4 vs 5 GHz). + * + * Limit the adjusted index to stay within the table! + * + * + * 11) Read gain table entries for DSP and radio gain, place into appropriate + * location(s) in command (struct iwl4965_txpowertable_cmd). + */ + +/** + * When MIMO is used (2 transmitters operating simultaneously), driver should + * limit each transmitter to deliver a max of 3 dB below the regulatory limit + * for the device. That is, use half power for each transmitter, so total + * txpower is within regulatory limits. + * + * The value "6" represents number of steps in gain table to reduce power 3 dB. + * Each step is 1/2 dB. + */ +#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) + +/** + * CCK gain compensation. + * + * When calculating txpowers for CCK, after making sure that the target power + * is within regulatory and saturation limits, driver must additionally + * back off gain by adding these values to the gain table index. + * + * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG, + * bits [3:2], 1 = B, 2 = C. + */ +#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9) +#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5) + +/* + * 4965 power supply voltage compensation for txpower + */ +#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7) + +/** + * Gain tables. + * + * The following tables contain pair of values for setting txpower, i.e. + * gain settings for the output of the device's digital signal processor (DSP), + * and for the analog gain structure of the transmitter. + * + * Each entry in the gain tables represents a step of 1/2 dB. Note that these + * are *relative* steps, not indications of absolute output power. Output + * power varies with temperature, voltage, and channel frequency, and also + * requires consideration of average power (to satisfy regulatory constraints), + * and peak power (to avoid distortion of the output signal). + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * EEPROM contains factory calibration data for txpower. This maps actual + * measured txpower levels to gain settings in the "well known" tables + * below ("well-known" means here that both factory calibration *and* the + * driver work with the same table). + * + * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table + * has an extension (into negative indexes), in case the driver needs to + * boost power setting for high device temperatures (higher than would be + * present during factory calibration). A 5 Ghz EEPROM index of "40" + * corresponds to the 49th entry in the table used by the driver. + */ +#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */ +#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */ + +/** + * 2.4 GHz gain table + * + * Index Dsp gain Radio gain + * 0 110 0x3f (highest gain) + * 1 104 0x3f + * 2 98 0x3f + * 3 110 0x3e + * 4 104 0x3e + * 5 98 0x3e + * 6 110 0x3d + * 7 104 0x3d + * 8 98 0x3d + * 9 110 0x3c + * 10 104 0x3c + * 11 98 0x3c + * 12 110 0x3b + * 13 104 0x3b + * 14 98 0x3b + * 15 110 0x3a + * 16 104 0x3a + * 17 98 0x3a + * 18 110 0x39 + * 19 104 0x39 + * 20 98 0x39 + * 21 110 0x38 + * 22 104 0x38 + * 23 98 0x38 + * 24 110 0x37 + * 25 104 0x37 + * 26 98 0x37 + * 27 110 0x36 + * 28 104 0x36 + * 29 98 0x36 + * 30 110 0x35 + * 31 104 0x35 + * 32 98 0x35 + * 33 110 0x34 + * 34 104 0x34 + * 35 98 0x34 + * 36 110 0x33 + * 37 104 0x33 + * 38 98 0x33 + * 39 110 0x32 + * 40 104 0x32 + * 41 98 0x32 + * 42 110 0x31 + * 43 104 0x31 + * 44 98 0x31 + * 45 110 0x30 + * 46 104 0x30 + * 47 98 0x30 + * 48 110 0x6 + * 49 104 0x6 + * 50 98 0x6 + * 51 110 0x5 + * 52 104 0x5 + * 53 98 0x5 + * 54 110 0x4 + * 55 104 0x4 + * 56 98 0x4 + * 57 110 0x3 + * 58 104 0x3 + * 59 98 0x3 + * 60 110 0x2 + * 61 104 0x2 + * 62 98 0x2 + * 63 110 0x1 + * 64 104 0x1 + * 65 98 0x1 + * 66 110 0x0 + * 67 104 0x0 + * 68 98 0x0 + * 69 97 0 + * 70 96 0 + * 71 95 0 + * 72 94 0 + * 73 93 0 + * 74 92 0 + * 75 91 0 + * 76 90 0 + * 77 89 0 + * 78 88 0 + * 79 87 0 + * 80 86 0 + * 81 85 0 + * 82 84 0 + * 83 83 0 + * 84 82 0 + * 85 81 0 + * 86 80 0 + * 87 79 0 + * 88 78 0 + * 89 77 0 + * 90 76 0 + * 91 75 0 + * 92 74 0 + * 93 73 0 + * 94 72 0 + * 95 71 0 + * 96 70 0 + * 97 69 0 + * 98 68 0 + */ + +/** + * 5 GHz gain table + * + * Index Dsp gain Radio gain + * -9 123 0x3F (highest gain) + * -8 117 0x3F + * -7 110 0x3F + * -6 104 0x3F + * -5 98 0x3F + * -4 110 0x3E + * -3 104 0x3E + * -2 98 0x3E + * -1 110 0x3D + * 0 104 0x3D + * 1 98 0x3D + * 2 110 0x3C + * 3 104 0x3C + * 4 98 0x3C + * 5 110 0x3B + * 6 104 0x3B + * 7 98 0x3B + * 8 110 0x3A + * 9 104 0x3A + * 10 98 0x3A + * 11 110 0x39 + * 12 104 0x39 + * 13 98 0x39 + * 14 110 0x38 + * 15 104 0x38 + * 16 98 0x38 + * 17 110 0x37 + * 18 104 0x37 + * 19 98 0x37 + * 20 110 0x36 + * 21 104 0x36 + * 22 98 0x36 + * 23 110 0x35 + * 24 104 0x35 + * 25 98 0x35 + * 26 110 0x34 + * 27 104 0x34 + * 28 98 0x34 + * 29 110 0x33 + * 30 104 0x33 + * 31 98 0x33 + * 32 110 0x32 + * 33 104 0x32 + * 34 98 0x32 + * 35 110 0x31 + * 36 104 0x31 + * 37 98 0x31 + * 38 110 0x30 + * 39 104 0x30 + * 40 98 0x30 + * 41 110 0x25 + * 42 104 0x25 + * 43 98 0x25 + * 44 110 0x24 + * 45 104 0x24 + * 46 98 0x24 + * 47 110 0x23 + * 48 104 0x23 + * 49 98 0x23 + * 50 110 0x22 + * 51 104 0x18 + * 52 98 0x18 + * 53 110 0x17 + * 54 104 0x17 + * 55 98 0x17 + * 56 110 0x16 + * 57 104 0x16 + * 58 98 0x16 + * 59 110 0x15 + * 60 104 0x15 + * 61 98 0x15 + * 62 110 0x14 + * 63 104 0x14 + * 64 98 0x14 + * 65 110 0x13 + * 66 104 0x13 + * 67 98 0x13 + * 68 110 0x12 + * 69 104 0x08 + * 70 98 0x08 + * 71 110 0x07 + * 72 104 0x07 + * 73 98 0x07 + * 74 110 0x06 + * 75 104 0x06 + * 76 98 0x06 + * 77 110 0x05 + * 78 104 0x05 + * 79 98 0x05 + * 80 110 0x04 + * 81 104 0x04 + * 82 98 0x04 + * 83 110 0x03 + * 84 104 0x03 + * 85 98 0x03 + * 86 110 0x02 + * 87 104 0x02 + * 88 98 0x02 + * 89 110 0x01 + * 90 104 0x01 + * 91 98 0x01 + * 92 110 0x00 + * 93 104 0x00 + * 94 98 0x00 + * 95 93 0x00 + * 96 88 0x00 + * 97 83 0x00 + * 98 78 0x00 + */ + + +/** + * Sanity checks and default values for EEPROM regulatory levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Regulatory limits refer to the maximum average txpower allowed by + * regulatory agencies in the geographies in which the device is meant + * to be operated. These limits are SKU-specific (i.e. geography-specific), + * and channel-specific; each channel has an individual regulatory limit + * listed in the EEPROM. + * + * Units are in half-dBm (i.e. "34" means 17 dBm). + */ +#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34) +#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34) +#define IWL_TX_POWER_REGULATORY_MIN (0) +#define IWL_TX_POWER_REGULATORY_MAX (34) + +/** + * Sanity checks and default values for EEPROM saturation levels. + * If EEPROM values fall outside MIN/MAX range, use default values. + * + * Saturation is the highest level that the output power amplifier can produce + * without significant clipping distortion. This is a "peak" power level. + * Different types of modulation (i.e. various "rates", and OFDM vs. CCK) + * require differing amounts of backoff, relative to their average power output, + * in order to avoid clipping distortion. + * + * Driver must make sure that it is violating neither the saturation limit, + * nor the regulatory limit, when calculating Tx power settings for various + * rates. + * + * Units are in half-dBm (i.e. "38" means 19 dBm). + */ +#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38) +#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38) +#define IWL_TX_POWER_SATURATION_MIN (20) +#define IWL_TX_POWER_SATURATION_MAX (50) + +/** + * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance) + * and thermal Txpower calibration. + * + * When calculating txpower, driver must compensate for current device + * temperature; higher temperature requires higher gain. Driver must calculate + * current temperature (see "4965 temperature calculation"), then compare vs. + * factory calibration temperature in EEPROM; if current temperature is higher + * than factory temperature, driver must *increase* gain by proportions shown + * in table below. If current temperature is lower than factory, driver must + * *decrease* gain. + * + * Different frequency ranges require different compensation, as shown below. + */ +/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR1_FCH 34 +#define CALIB_IWL_TX_ATTEN_GR1_LCH 43 + +/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR2_FCH 44 +#define CALIB_IWL_TX_ATTEN_GR2_LCH 70 + +/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR3_FCH 71 +#define CALIB_IWL_TX_ATTEN_GR3_LCH 124 + +/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR4_FCH 125 +#define CALIB_IWL_TX_ATTEN_GR4_LCH 200 + +/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */ +#define CALIB_IWL_TX_ATTEN_GR5_FCH 1 +#define CALIB_IWL_TX_ATTEN_GR5_LCH 20 + +enum { + CALIB_CH_GROUP_1 = 0, + CALIB_CH_GROUP_2 = 1, + CALIB_CH_GROUP_3 = 2, + CALIB_CH_GROUP_4 = 3, + CALIB_CH_GROUP_5 = 4, + CALIB_CH_GROUP_MAX +}; + +/********************* END TXPOWER *****************************************/ + + +/** + * Tx/Rx Queues + * + * Most communication between driver and 4965 is via queues of data buffers. + * For example, all commands that the driver issues to device's embedded + * controller (uCode) are via the command queue (one of the Tx queues). All + * uCode command responses/replies/notifications, including Rx frames, are + * conveyed from uCode to driver via the Rx queue. + * + * Most support for these queues, including handshake support, resides in + * structures in host DRAM, shared between the driver and the device. When + * allocating this memory, the driver must make sure that data written by + * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's + * cache memory), so DRAM and cache are consistent, and the device can + * immediately see changes made by the driver. + * + * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via + * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array + * in DRAM containing 256 Transmit Frame Descriptors (TFDs). + */ +#define IWL49_NUM_FIFOS 7 +#define IWL49_CMD_FIFO_NUM 4 +#define IWL49_NUM_QUEUES 16 +#define IWL49_NUM_AMPDU_QUEUES 8 + + +/** + * struct iwl4965_schedq_bc_tbl + * + * Byte Count table + * + * Each Tx queue uses a byte-count table containing 320 entries: + * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that + * duplicate the first 64 entries (to avoid wrap-around within a Tx window; + * max Tx window is 64 TFDs). + * + * When driver sets up a new TFD, it must also enter the total byte count + * of the frame to be transmitted into the corresponding entry in the byte + * count table for the chosen Tx queue. If the TFD index is 0-63, the driver + * must duplicate the byte count entry in corresponding index 256-319. + * + * padding puts each byte count table on a 1024-byte boundary; + * 4965 assumes tables are separated by 1024 bytes. + */ +struct iwl4965_scd_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; + u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; +} __packed; + + +#define IWL4965_RTC_INST_LOWER_BOUND (0x000000) + +/* RSSI to dBm */ +#define IWL4965_RSSI_OFFSET 44 + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +/* PCI register values */ +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +#define IWL4965_DEFAULT_TX_RETRY 15 + +/* Limit range of txpower output target to be between these values */ +#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ + +/* EEPROM */ +#define IWL4965_FIRST_AMPDU_QUEUE 10 + + +#endif /* !__iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c new file mode 100644 index 000000000000..26d324e30692 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c @@ -0,0 +1,74 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-4965-led.h" + +/* Send led command */ +static int +iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + u32 reg; + + reg = iwl_read32(priv, CSR_LED_REG); + if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) + iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); + + return iwl_legacy_send_cmd(priv, &cmd); +} + +/* Set led register off */ +void iwl4965_led_enable(struct iwl_priv *priv) +{ + iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); +} + +const struct iwl_led_ops iwl4965_led_ops = { + .cmd = iwl4965_send_led_cmd, +}; diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h new file mode 100644 index 000000000000..5ed3615fc338 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h @@ -0,0 +1,33 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_4965_led_h__ +#define __iwl_4965_led_h__ + +extern const struct iwl_led_ops iwl4965_led_ops; +void iwl4965_led_enable(struct iwl_priv *priv); + +#endif /* __iwl_4965_led_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c new file mode 100644 index 000000000000..c1a24946715e --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c @@ -0,0 +1,1260 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" +#include "iwl-sta.h" + +void iwl4965_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status) +{ + if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { + IWL_ERR(priv, "Tx flush command to flush out all frames\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->tx_flush); + } +} + +/* + * EEPROM + */ +struct iwl_mod_params iwl4965_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + u32 rb_timeout = 0; + + if (priv->cfg->mod_params->amsdu_size_8K) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + /* Reset driver's Rx queue write index */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | + rb_size| + (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + return 0; +} + +static void iwl4965_set_pwr_vmain(struct iwl_priv *priv) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do: + + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + */ + + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +int iwl4965_hw_nic_init(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + int ret; + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + + /* Set interrupt coalescing calibration timer to default (512 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl4965_set_pwr_vmain(priv); + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + ret = iwl_legacy_rx_queue_alloc(priv); + if (ret) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl4965_rx_queue_reset(priv, rxq); + + iwl4965_rx_replenish(priv); + + iwl4965_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + rxq->need_update = 1; + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Allocate or reset and init all Tx and Command queues */ + if (!priv->txq) { + ret = iwl4965_txq_ctx_alloc(priv); + if (ret) + return ret; + } else + iwl4965_txq_ctx_reset(priv); + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/** + * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +void iwl4965_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + + spin_lock_irqsave(&rxq->lock, flags); + while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, + rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, + "Failed to alloc_pages with %s. " + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + BUG_ON(rxb->page); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl4965_rx_replenish(struct iwl_priv *priv) +{ + unsigned long flags; + + iwl4965_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl4965_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +void iwl4965_rx_replenish_now(struct iwl_priv *priv) +{ + iwl4965_rx_allocate(priv, GFP_ATOMIC); + + iwl4965_rx_queue_restock(priv); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + +int iwl4965_rxq_stop(struct iwl_priv *priv) +{ + + /* stop Rx DMA */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + + return 0; +} + +int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +{ + int idx = 0; + int band_offset = 0; + + /* HT rate format: mac80211 wants an MCS number, which is just LSB */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + return idx; + /* Legacy rate format, search for match in table */ + } else { + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx - band_offset; + } + + return -1; +} + +static int iwl4965_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct iwl4965_rx_non_cfg_phy *ncphy = + (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) + >> IWL49_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) + >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWL4965_RSSI_OFFSET; +} + + +static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + } + + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", + decrypt_in, decrypt_out); + + return decrypt_out; +} + +static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u16 len, + u32 ampdu_status, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!priv->cfg->mod_params->sw_crypto && + iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats)) + return; + + skb = dev_alloc_skb(128); + if (!skb) { + IWL_ERR(priv, "dev_alloc_skb failed\n"); + return; + } + + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + iwl_legacy_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +/* Called for REPLY_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct iwl_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + /** + * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. + * REPLY_RX: physical layer info is in this buffer + * REPLY_RX_MPDU_CMD: physical layer info was sent in separate + * command and cached in priv->last_phy_res + * + * Here we set up local variables depending on which command is + * received. + */ + if (pkt->hdr.cmd == REPLY_RX) { + phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt); + + len = le16_to_cpu(phy_res->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt + len); + ampdu_status = le32_to_cpu(rx_pkt_status); + } else { + if (!priv->_4965.last_phy_res_valid) { + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); + return; + } + phy_res = &priv->_4965.last_phy_res; + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); + ampdu_status = iwl4965_translate_rx_status(priv, + le32_to_cpu(rx_pkt_status)); + } + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), + rx_status.band); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.rate_idx = + iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_TSFT;*/ + + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = iwl4965_calc_rssi(priv, phy_res); + + iwl_legacy_dbg_log_rx_data_frame(priv, len, header); + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", + rx_status.signal, (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + + iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status, + rxb, &rx_status); +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + priv->_4965.last_phy_res_valid = true; + memcpy(&priv->_4965.last_phy_res, pkt->u.raw, + sizeof(struct iwl_rx_phy_res)); +} + +static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_supported_band *sband; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added = 0; + u16 channel = 0; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) { + IWL_ERR(priv, "invalid band\n"); + return added; + } + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + channel = iwl_legacy_get_single_channel_number(priv, band); + if (channel) { + scan_ch->channel = cpu_to_le16(channel); + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + added++; + } else + IWL_ERR(priv, "no valid channel found\n"); + return added; +} + +static int iwl4965_get_channels_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + u16 channel; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + channel = chan->hw_value; + scan_ch->channel = cpu_to_le16(channel); + + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, + "Channel %d is INVALID for this band.\n", + channel); + continue; + } + + if (!is_active || iwl_legacy_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + + if (n_probes) + scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + + IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", + channel, le32_to_cpu(scan_ch->type), + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + "ACTIVE" : "PASSIVE", + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); + return added; +} + +int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct iwl_scan_cmd *scan; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u32 rate_flags = 0; + u16 cmd_len; + u16 rx_chain = 0; + enum ieee80211_band band; + u8 n_probes = 0; + u8 rx_ant = priv->hw_params.valid_rx_ant; + u8 rate; + bool is_active = false; + int chan_mod; + u8 active_chains; + u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; + int ret; + + lockdep_assert_held(&priv->mutex); + + if (vif) + ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + if (!priv->scan_cmd) { + priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan_cmd) { + IWL_DEBUG_SCAN(priv, + "fail to allocate memory for scan\n"); + return -ENOMEM; + } + } + scan = priv->scan_cmd; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_legacy_is_any_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + if (priv->is_internal_short_scan) + interval = 0; + else + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); + } else if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = ctx->bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + switch (priv->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + chan_mod = le32_to_cpu( + priv->contexts[IWL_RXON_CTX_BSS].active.flags & + RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + if (chan_mod == CHANNEL_MODE_PURE_40) { + rate = IWL_RATE_6M_PLCP; + } else { + rate = IWL_RATE_1M_PLCP; + rate_flags = RATE_MCS_CCK_MSK; + } + break; + case IEEE80211_BAND_5GHZ: + rate = IWL_RATE_6M_PLCP; + break; + default: + IWL_WARN(priv, "Invalid scan band\n"); + return -EIO; + } + + /* + * If active scanning is requested but a certain channel is + * marked passive, we can do active scanning if we detect + * transmissions. + * + * There is an issue with some firmware versions that triggers + * a sysassert on a "good CRC threshold" of zero (== disabled), + * on a radar channel even though this means that we should NOT + * send probes. + * + * The "good CRC threshold" is the number of frames that we + * need to receive during our dwell time on a channel before + * sending out probes -- setting this to a huge value will + * mean we never reach it, but at the same time work around + * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER + * here instead of IWL_GOOD_CRC_TH_DISABLED. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_NEVER; + + band = priv->scan_band; + + if (priv->cfg->scan_rx_antennas[band]) + rx_ant = priv->cfg->scan_rx_antennas[band]; + + if (priv->cfg->scan_tx_antennas[band]) + scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; + + priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv, + priv->scan_tx_ant[band], + scan_tx_antennas); + rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]); + scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags); + + /* In power save mode use one chain, otherwise use all chains */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* rx_ant has been set to all valid chains previously */ + active_chains = rx_ant & + ((u8)(priv->chain_noise_data.active_chains)); + if (!active_chains) + active_chains = rx_ant; + + IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", + priv->chain_noise_data.active_chains); + + rx_ant = iwl4965_first_antenna(active_chains); + } + + /* MIMO is not used here, but value is required */ + rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); + if (!priv->is_internal_short_scan) { + cmd_len = iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + vif->addr, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + } else { + /* use bcast addr, will not be transmitted but must be valid */ + cmd_len = iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + iwl_bcast_addr, NULL, 0, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + + } + scan->tx_cmd.len = cpu_to_le16(cmd_len); + + scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | + RXON_FILTER_BCON_AWARE_MSK); + + if (priv->is_internal_short_scan) { + scan->channel_count = + iwl4965_get_single_channel_for_scan(priv, vif, band, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } else { + scan->channel_count = + iwl4965_get_channels_for_scan(priv, vif, band, + is_active, n_probes, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + clear_bit(STATUS_SCAN_HW, &priv->status); + + return ret; +} + +int iwl4965_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + if (add) + return iwl4965_add_bssid_station(priv, vif_priv->ctx, + vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed) +{ + lockdep_assert_held(&priv->sta_lock); + + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", + priv->stations[sta_id].tid[tid].tfds_in_queue, + freed); + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} + +#define IWL_TX_QUEUE_MSK 0xfffff + +static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv) +{ + return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || + priv->current_ht_config.single_chain_sufficient; +} + +#define IWL_NUM_RX_CHAINS_MULTIPLE 3 +#define IWL_NUM_RX_CHAINS_SINGLE 2 +#define IWL_NUM_IDLE_CHAINS_DUAL 2 +#define IWL_NUM_IDLE_CHAINS_SINGLE 1 + +/* + * Determine how many receiver/antenna chains to use. + * + * More provides better reception via diversity. Fewer saves power + * at the expense of throughput, but only when not in powersave to + * start with. + * + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv) +{ + /* # of Rx chains to use when expecting MIMO. */ + if (iwl4965_is_single_rx_stream(priv)) + return IWL_NUM_RX_CHAINS_SINGLE; + else + return IWL_NUM_RX_CHAINS_MULTIPLE; +} + +/* + * When we are in power saving mode, unless device support spatial + * multiplexing power save, use the active count for rx chain count. + */ +static int +iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) +{ + /* # Rx chains when idling, depending on SMPS mode */ + switch (priv->current_ht_config.smps) { + case IEEE80211_SMPS_STATIC: + case IEEE80211_SMPS_DYNAMIC: + return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_OFF: + return active_cnt; + default: + WARN(1, "invalid SMPS mode %d", + priv->current_ht_config.smps); + return active_cnt; + } +} + +/* up to 4 chains */ +static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap) +{ + u8 res; + res = (chain_bitmap & BIT(0)) >> 0; + res += (chain_bitmap & BIT(1)) >> 1; + res += (chain_bitmap & BIT(2)) >> 2; + res += (chain_bitmap & BIT(3)) >> 3; + return res; +} + +/** + * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image + * + * Selects how many and which Rx receivers/antennas/chains to use. + * This should not be used for scan command ... it puts data in wrong place. + */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + bool is_single = iwl4965_is_single_rx_stream(priv); + bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); + u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; + u32 active_chains; + u16 rx_chain; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl4965_chain_noise_calibration() + * checks which antennas actually *are* connected. */ + if (priv->chain_noise_data.active_chains) + active_chains = priv->chain_noise_data.active_chains; + else + active_chains = priv->hw_params.valid_rx_ant; + + rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; + + /* How many receivers should we use? */ + active_rx_cnt = iwl4965_get_active_rx_chain_count(priv); + idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt); + + + /* correct rx chain count according hw settings + * and chain noise calibration + */ + valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains); + if (valid_rx_cnt < active_rx_cnt) + active_rx_cnt = valid_rx_cnt; + + if (valid_rx_cnt < idle_rx_cnt) + idle_rx_cnt = valid_rx_cnt; + + rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; + rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; + + ctx->staging.rx_chain = cpu_to_le16(rx_chain); + + if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) + ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", + ctx->staging.rx_chain, + active_rx_cnt, idle_rx_cnt); + + WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || + active_rx_cnt < idle_rx_cnt); +} + +u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) +{ + int i; + u8 ind = ant; + + for (i = 0; i < RATE_ANT_NUM - 1; i++) { + ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; + if (valid & BIT(ind)) + return ind; + } + return ant; +} + +static const char *iwl4965_get_fh_string(int cmd) +{ + switch (cmd) { + IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); + IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); + IWL_CMD(FH_RSCSR_CHNL0_WPTR); + IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); + IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); + IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); + IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +} + +int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display) +{ + int i; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + int pos = 0; + size_t bufsz = 0; +#endif + static const u32 fh_tbl[] = { + FH_RSCSR_CHNL0_STTS_WPTR_REG, + FH_RSCSR_CHNL0_RBDCB_BASE_REG, + FH_RSCSR_CHNL0_WPTR, + FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_MEM_RSSR_SHARED_CTRL_REG, + FH_MEM_RSSR_RX_STATUS_REG, + FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG + }; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (display) { + bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, + "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + pos += scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + iwl4965_get_fh_string(fh_tbl[i]), + iwl_legacy_read_direct32(priv, fh_tbl[i])); + } + return pos; + } +#endif + IWL_ERR(priv, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + IWL_ERR(priv, " %34s: 0X%08x\n", + iwl4965_get_fh_string(fh_tbl[i]), + iwl_legacy_read_direct32(priv, fh_tbl[i])); + } + return 0; +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c new file mode 100644 index 000000000000..69abd2816f8d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c @@ -0,0 +1,2870 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/wireless.h> +#include <net/mac80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include "iwl-dev.h" +#include "iwl-sta.h" +#include "iwl-core.h" +#include "iwl-4965.h" + +#define IWL4965_RS_NAME "iwl-4965-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ +#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IWL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO2_##s##M_PLCP,\ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ +}; + +static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + + if (idx >= IWL_RATE_MIMO2_6M_PLCP) + idx = idx - IWL_RATE_MIMO2_6M_PLCP; + + idx += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++) + if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx; + } + + return -1; +} + +static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta); +static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 rate_n_flags); +static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, + bool force_search); + +#ifdef CONFIG_MAC80211_DEBUGFS +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index); +#else +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ + {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ +}; + +static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ + {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ + {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ + {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ +}; + +static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ + {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & 0xFF); +} + +static void +iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the statistics. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void +iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && + (tl->time_stamp < oldest_time)) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else + return MAX_TID_COUNT; + + if (unlikely(tid >= TID_MAX_LOAD_COUNT)) + return MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + iwl4965_rs_tl_rm_old_stats(tl, curr_time); + + index = (tl->head + index) % TID_QUEUE_MAX_SIZE; + tl->packet_count[index] = tl->packet_count[index] + 1; + tl->total = tl->total + 1; + + if ((index + 1) > tl->queue_count) + tl->queue_count = index + 1; + + return tid; +} + +/* + get the traffic load value for tid +*/ +static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + iwl4965_rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + u32 load; + + load = iwl4965_rs_tl_get_load(lq_data, tid); + + if (load > IWL_AGG_LOAD_THRESHOLD) { + IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } else { + IWL_ERR(priv, "Aggregation not enabled for tid %d " + "because load = %u\n", tid, load); + } + return ret; +} + +static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < TID_MAX_LOAD_COUNT) + iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", + tid, TID_MAX_LOAD_COUNT); +} + +static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 +iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/** + * iwl4965_rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) +{ + struct iwl_rate_scale_data *window = NULL; + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + /* Select window for current tx bit rate */ + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = iwl4965_get_expected_tpt(tbl, scale_index); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_Ht(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) { + IWL_ERR(priv, "Invalid HT rate index %d\n", index); + index = IWL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= iwl_rates[index].plcp_siso; + else + rate_n_flags |= iwl_rates[index].plcp_mimo2; + } else { + IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) { + if (tbl->is_dup) + rate_n_flags |= RATE_MCS_DUP_MSK; + else + rate_n_flags |= RATE_MCS_HT40_MSK; + } + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IWL_ERR(priv, "GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); + *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == IWL_RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->is_dup = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IWL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (iwl4965_num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((rate_n_flags & RATE_MCS_HT40_MSK) || + (rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_ht40 = 1; + + if (rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + mcs = iwl4965_rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= IWL_RATE_SISO_60M_PLCP) { + if (iwl4965_num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE*/ + /* MIMO2 */ + } else { + if (iwl4965_num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct iwl_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while ((new_ant_type != tbl->ant_type) && + !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static bool iwl4965_rs_use_green(struct ieee80211_sta *sta) +{ + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ctx->ht.non_gf_sta_present); +} + +/** + * iwl4965_rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type) +{ + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else + return lq_sta->active_mimo2_rate; + } +} + +static u16 +iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + u8 scale_index, u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct iwl_priv *priv = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (iwl4965_num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IWL_MAX_SEARCH; + } + + rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = (u16)(rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_index))) { + low = scale_index; + goto out; + } + + high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv, + scale_index, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == IWL_RATE_INVALID) + low = scale_index; + +out: + return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a, + struct iwl_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && + (a->is_SGI == b->is_SGI); +} + +/* + * mac80211 sends us Tx status + */ +static void +iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_index, mac_index, i; + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_link_quality_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + IWL_DEBUG_RATE_LIMIT(priv, + "get frame ack response, update rate scale window\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + return; + } + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, + priv->band, &tbl_type, &rs_index); + if (priv->band == IEEE80211_BAND_5GHZ) + rs_index -= IWL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_index = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ + if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) + mac_index++; + /* + * mac80211 HT index is always zero-indexed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (priv->band == IEEE80211_BAND_2GHZ) + mac_index += IWL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if ((mac_index < 0) || + (tbl_type.is_SGI != + !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || + (tbl_type.is_ht40 != + !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || + (tbl_type.is_dup != + !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || + (tbl_type.ant_type != info->antenna_sel_tx) || + (!!(tx_rate & RATE_MCS_HT_MSK) != + !!(mac_flags & IEEE80211_TX_RC_MCS)) || + (!!(tx_rate & RATE_MCS_GF_MSK) != + !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || + (rs_index != mac_index)) { + IWL_DEBUG_RATE(priv, + "initial rate %d does not match %d (0x%x)\n", + mac_index, rs_index, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, + CMD_ASYNC, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (iwl4965_table_type_matches(&tbl_type, + &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else if (iwl4965_table_type_matches(&tbl_type, + &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + IWL_DEBUG_RATE(priv, + "Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n", + tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI); + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + iwl4965_rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, + &rs_index); + iwl4965_rs_collect_tx_data(curr_tbl, rs_index, + info->status.ampdu_len, + info->status.ampdu_ack_len); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, + &tbl_type, &rs_index); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (iwl4965_table_type_matches(&tbl_type, curr_tbl)) + tmp_tbl = curr_tbl; + else if (iwl4965_table_type_matches(&tbl_type, + other_tbl)) + tmp_tbl = other_tbl; + else + continue; + iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta && sta->supp_rates[sband->band]) + iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else /* if (is_mimo2(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 index) +{ + /* "active" values */ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[index].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[index]; + + /* expected "search" throughput */ + s32 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = index; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != IWL_RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != IWL_RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +/* + * Set up search table for MIMO2 + */ +static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) + == WLAN_HT_CAP_SM_PS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 2) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + + rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, + "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, + tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); + + tbl->is_dup = lq_sta->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ + + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, + "can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, + tbl, rate, is_green); + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret = 0; + u8 update_search_tbl_counter = 0; + + tbl->action = IWL_LEGACY_SWITCH_SISO; + + start_action = tbl->action; + for (; ;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA1: + case IWL_LEGACY_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + iwl4965_rs_set_expected_tpt_table(lq_sta, + search_tbl); + goto out; + } + break; + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IWL_LEGACY_SWITCH_MIMO2_AB: + case IWL_LEGACY_SWITCH_MIMO2_AC: + case IWL_LEGACY_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + return 0; + +} + +/* + * Try to switch to new modulation mode from SISO + */ +static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + u8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA1: + case IWL_SISO_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); + if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_SISO_SWITCH_MIMO2_AB: + case IWL_SISO_SWITCH_MIMO2_AC: + case IWL_SISO_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + case IWL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IWL_ERR(priv, + "SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + iwl4965_rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO2_SWITCH_ANTENNA1: + case IWL_MIMO2_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_MIMO2_SWITCH_SISO_A: + case IWL_MIMO2_SWITCH_SISO_B: + case IWL_MIMO2_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_siso(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + iwl4965_rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + + } + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_GI) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_GI) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; + +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void +iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_priv *priv; + + priv = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + (lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(priv, + "LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + * return rate_n_flags as used in the table + */ +static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green); + iwl4965_rs_fill_link_cmd(priv, lq_sta, rate); + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); + + return rate; +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + u32 rate; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = MAX_TID_COUNT; + struct iwl_tid_data *tid_data; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + if (!sta || !lq_sta) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = iwl4965_rs_tl_add_packet(lq_sta, hdr); + if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else + lq_sta->is_agg = 0; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = iwl4965_rs_use_green(sta); + is_green = lq_sta->is_green; + + /* current tx rate */ + index = lq_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, + tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_index_msk = (u16) (rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_sta->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (!((1 << index) & rate_scale_index_msk)) { + IWL_ERR(priv, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, + tbl, index, is_green); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < index)) { + index = lq_sta->max_rate_idx; + update_lq = 1; + window = &(tbl->win[index]); + goto lq_update; + } + + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " + "for index %d\n", + window->success_counter, window->counter, index); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + iwl4965_rs_stay_in_table(lq_sta, false); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + if (window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)) { + IWL_ERR(priv, + "expected_tpt should have been calculated by now\n"); + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + + IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + + /* Else poor success; go back to mode in "active" table */ + } else { + + IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = iwl4965_rs_get_adjacent_rate(priv, index, + rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < high)) + high = IWL_RATE_INVALID; + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low success_ratio\n"); + scale_action = -1; + + /* No throughput measured yet for adjacent rates; try increase. */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IWL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && + sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } else { + scale_action = 0; + } + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IWL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((sr > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, + tbl, index, is_green); + + /* Should we stay with this modulation mode, + * or search for a new one? */ + iwl4965_rs_stay_in_table(lq_sta, false); + + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && + !lq_sta->stay_in_tbl && window->counter) { + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + iwl4965_rs_move_legacy_other(priv, lq_sta, + conf, sta, index); + else if (is_siso(tbl->lq_type)) + iwl4965_rs_move_siso_to_other(priv, lq_sta, + conf, sta, index); + else /* (is_mimo2(tbl->lq_type)) */ + iwl4965_rs_move_mimo2_to_other(priv, lq_sta, + conf, sta, index); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + + /* Use new "search" start rate */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + + IWL_DEBUG_RATE(priv, + "Switch current mcs: %X index: %d\n", + tbl->current_rate, index); + iwl4965_rs_fill_link_cmd(priv, lq_sta, + tbl->current_rate); + iwl_legacy_send_lq_cmd(priv, ctx, + &lq_sta->lq, CMD_ASYNC, false); + } else + done_search = 1; + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && + lq_sta->action_counter > tbl1->max_search) { + IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); + iwl4965_rs_set_stay_in_table(priv, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + (lq_sta->action_counter >= tbl1->max_search)) { + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != MAX_TID_COUNT)) { + tid_data = + &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(priv, + "try to aggregate tid %d\n", + tid); + iwl4965_rs_tl_turn_on_agg(priv, tid, + lq_sta, sta); + } + } + iwl4965_rs_set_stay_in_table(priv, 0, lq_sta); + } + } + +out: + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, + index, is_green); + i = index; + lq_sta->last_txrate_idx = i; +} + +/** + * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void iwl4965_rs_initialize_lq(struct iwl_priv *priv, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = iwl4965_rs_use_green(sta); + u8 active_tbl = 0; + u8 valid_tx_ant; + struct iwl_station_priv *sta_priv; + struct iwl_rxon_context *ctx; + + if (!sta || !lq_sta) + return; + + sta_priv = (void *)sta->drv_priv; + ctx = sta_priv->common.ctx; + + i = lq_sta->last_txrate_idx; + + valid_tx_ant = priv->hw_params.valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + rate = iwl_rates[i].plcp; + tbl->ant_type = iwl4965_first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); + tbl->current_rate = rate; + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate); + priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true); +} + +static void +iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = priv_sta; + int rate_idx; + + IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && + (lq_sta->max_rate_idx != -1)) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((lq_sta->max_rate_idx < 0) || + (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) + lq_sta->max_rate_idx = -1; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + priv_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IWL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS index */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_INDEX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_DUP_DATA; + if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || + ((sband->band == IEEE80211_BAND_5GHZ) && + (rate_idx < IWL_FIRST_OFDM_RATE))) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust index */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; + +} + +static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_lq_sta *lq_sta; + struct iwl_station_priv *sta_priv = + (struct iwl_station_priv *) sta->drv_priv; + struct iwl_priv *priv; + + priv = (struct iwl_priv *)priv_rate; + IWL_DEBUG_RATE(priv, "create station rate scale window\n"); + + lq_sta = &sta_priv->lq_sta; + + return lq_sta; +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void +iwl4965_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, + u8 sta_id) +{ + int i, j; + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &priv->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct iwl_station_priv *sta_priv; + struct iwl_lq_sta *lq_sta; + struct ieee80211_supported_band *sband; + + sta_priv = (struct iwl_station_priv *) sta->drv_priv; + lq_sta = &sta_priv->lq_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + + lq_sta->lq.sta_id = sta_id; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &lq_sta->lq_info[j].win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &lq_sta->lq_info[j].win[i]); + + IWL_DEBUG_RATE(priv, "LQ:" + "*** rate scale station global init for station %d ***\n", + sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->is_dup = 0; + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; + lq_sta->is_green = iwl4965_rs_use_green(sta); + lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); + lq_sta->band = priv->band; + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + /* These values will be overridden later */ + lq_sta->lq.general_params.single_stream_ant_msk = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + if (!lq_sta->lq.general_params.dual_stream_ant_msk) { + lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; + } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->drv = priv; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; + +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->dbg_fixed_rate = 0; +#endif + + iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta); +} + +static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 new_rate) +{ + struct iwl_scale_tbl_info tbl_type; + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (index 0) if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Interpret new_rate (rate_n_flags) */ + iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (index 0) */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) { + lq_cmd->general_params.single_stream_ant_msk = + tbl_type.ant_type; + } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) { + lq_cmd->general_params.dual_stream_ant_msk = + tbl_type.ant_type; + } /* otherwise we don't modify the existing value */ + + index++; + repeat_rate--; + if (priv) + valid_tx_ant = priv->hw_params.valid_tx_ant; + + /* Fill rest of rate table */ + while (index < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + iwl4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + } + + /* Override next rate if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate); + repeat_rate--; + index++; + } + + iwl4965_rs_get_tbl_info_from_mcs(new_rate, + lq_sta->band, &tbl_type, + &rate_idx); + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + /* Get next rate */ + new_rate = iwl4965_rs_get_lower_rate(lq_sta, + &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + iwl4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + index++; + repeat_rate--; + } + + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); +} + +static void +*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void iwl4965_rs_free(void *priv_rate) +{ + return; +} + +static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl_priv *priv __maybe_unused = priv_r; + + IWL_DEBUG_RATE(priv, "enter\n"); + IWL_DEBUG_RATE(priv, "leave\n"); +} + + +#ifdef CONFIG_MAC80211_DEBUGFS +static int iwl4965_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{ + struct iwl_priv *priv; + u8 valid_tx_ant; + u8 ant_sel_tx; + + priv = lq_sta->drv; + valid_tx_ant = priv->hw_params.valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) + >> RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IWL_ERR(priv, + "Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } + } else { + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } +} + +static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + char buf[64]; + int buf_size; + u32 parsed_rate; + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + priv = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, + false); + } + + return count; +} + +static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int index = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + priv = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", + (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", + (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += sprintf(buff+desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); + desc += sprintf(buff+desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += sprintf(buff+desc, " %s %s %s\n", + (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.general_params.flags, + lq_sta->lq.general_params.mimo_delimiter, + lq_sta->lq.general_params.single_stream_ant_msk, + lq_sta->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), + lq_sta->lq.agg_params.agg_dis_start_th, + lq_sta->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.general_params.start_rate_index[0], + lq_sta->lq.general_params.start_rate_index[1], + lq_sta->lq.general_params.start_rate_index[2], + lq_sta->lq.general_params.start_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + index = iwl4965_hwrate_to_plcp_idx( + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); + if (is_legacy(tbl->lq_type)) { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps); + } else { + desc += sprintf(buff+desc, + " rate[%d] 0x%X %smbps (%s)\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = iwl4965_rs_sta_dbgfs_scale_table_write, + .read = iwl4965_rs_sta_dbgfs_scale_table_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; +static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, + "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" + "rate=0x%X\n", + lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->lq_info[i].is_dup, + lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = iwl4965_rs_sta_dbgfs_stats_table_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; + +static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[120]; + int desc = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + + priv = lq_sta->drv; + + if (is_Ht(tbl->lq_type)) + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; + +static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + +} + +static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void +iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ +} +static struct rate_control_ops rs_4965_ops = { + .module = NULL, + .name = IWL4965_RS_NAME, + .tx_status = iwl4965_rs_tx_status, + .get_rate = iwl4965_rs_get_rate, + .rate_init = iwl4965_rs_rate_init_stub, + .alloc = iwl4965_rs_alloc, + .free = iwl4965_rs_free, + .alloc_sta = iwl4965_rs_alloc_sta, + .free_sta = iwl4965_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = iwl4965_rs_add_debugfs, + .remove_sta_debugfs = iwl4965_rs_remove_debugfs, +#endif +}; + +int iwl4965_rate_control_register(void) +{ + pr_err("Registering 4965 rate control operations\n"); + return ieee80211_rate_control_register(&rs_4965_ops); +} + +void iwl4965_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_4965_ops); +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c new file mode 100644 index 000000000000..b9fa2f6411a7 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c @@ -0,0 +1,291 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-4965-calib.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" + +void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + priv->missed_beacon_threshold) { + IWL_DEBUG_CALIB(priv, + "missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(STATUS_SCANNING, &priv->status)) + iwl4965_init_sensitivity(priv); + } +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl4965_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info; + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a, bcn_silence_b, bcn_silence_c; + int last_rx_noise; + + rx_info = &(priv->_4965.statistics.rx.general); + bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + last_rx_noise = (total_silence / num_active_rx) - 107; + else + last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + last_rx_noise); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +/* + * based on the assumption of all statistics counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void iwl4965_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ + int i, size; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + struct statistics_general_common *general, *accum_general; + struct statistics_tx *tx, *accum_tx; + + prev_stats = (__le32 *)&priv->_4965.statistics; + accum_stats = (u32 *)&priv->_4965.accum_statistics; + size = sizeof(struct iwl_notif_statistics); + general = &priv->_4965.statistics.general.common; + accum_general = &priv->_4965.accum_statistics.general.common; + tx = &priv->_4965.statistics.tx; + accum_tx = &priv->_4965.accum_statistics.tx; + delta = (u32 *)&priv->_4965.delta_statistics; + max_delta = (u32 *)&priv->_4965.max_delta; + + for (i = sizeof(__le32); i < size; + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; + } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + accum_general->temperature = general->temperature; + accum_general->ttl_timestamp = general->ttl_timestamp; +} +#endif + +#define REG_RECALIB_PERIOD (60) + +/** + * iwl4965_good_plcp_health - checks for plcp error. + * + * When the plcp error is exceeding the thresholds, reset the radio + * to improve the throughput. + */ +bool iwl4965_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) +{ + bool rc = true; + int combined_plcp_delta; + unsigned int plcp_msec; + unsigned long plcp_received_jiffies; + + if (priv->cfg->base_params->plcp_delta_threshold == + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { + IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); + return rc; + } + + /* + * check for plcp_err and trigger radio reset if it exceeds + * the plcp error threshold plcp_delta. + */ + plcp_received_jiffies = jiffies; + plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - + (long) priv->plcp_jiffies); + priv->plcp_jiffies = plcp_received_jiffies; + /* + * check to make sure plcp_msec is not 0 to prevent division + * by zero. + */ + if (plcp_msec) { + struct statistics_rx_phy *ofdm; + struct statistics_rx_ht_phy *ofdm_ht; + + ofdm = &pkt->u.stats.rx.ofdm; + ofdm_ht = &pkt->u.stats.rx.ofdm_ht; + combined_plcp_delta = + (le32_to_cpu(ofdm->plcp_err) - + le32_to_cpu(priv->_4965.statistics. + rx.ofdm.plcp_err)) + + (le32_to_cpu(ofdm_ht->plcp_err) - + le32_to_cpu(priv->_4965.statistics. + rx.ofdm_ht.plcp_err)); + + if ((combined_plcp_delta > 0) && + ((combined_plcp_delta * 100) / plcp_msec) > + priv->cfg->base_params->plcp_delta_threshold) { + /* + * if plcp_err exceed the threshold, + * the following data is printed in csv format: + * Text: plcp_err exceeded %d, + * Received ofdm.plcp_err, + * Current ofdm.plcp_err, + * Received ofdm_ht.plcp_err, + * Current ofdm_ht.plcp_err, + * combined_plcp_delta, + * plcp_msec + */ + IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " + "%u, %u, %u, %u, %d, %u mSecs\n", + priv->cfg->base_params->plcp_delta_threshold, + le32_to_cpu(ofdm->plcp_err), + le32_to_cpu(ofdm->plcp_err), + le32_to_cpu(ofdm_ht->plcp_err), + le32_to_cpu(ofdm_ht->plcp_err), + combined_plcp_delta, plcp_msec); + + rc = false; + } + } + return rc; +} + +void iwl4965_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_4965.statistics.general.common.temperature != + pkt->u.stats.general.common.temperature) || + ((priv->_4965.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); +#endif + + iwl_legacy_recover_from_statistics(priv, pkt); + + memcpy(&priv->_4965.statistics, &pkt->u.stats, + sizeof(priv->_4965.statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl4965_rx_calc_noise(priv); + queue_work(priv->workqueue, &priv->run_time_calib_work); + } + if (priv->cfg->ops->lib->temp_ops.temperature && change) + priv->cfg->ops->lib->temp_ops.temperature(priv); +} + +void iwl4965_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + memset(&priv->_4965.accum_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_4965.delta_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_4965.max_delta, 0, + sizeof(struct iwl_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl4965_rx_statistics(priv, rxb); +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c new file mode 100644 index 000000000000..057da2c3bf95 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c @@ -0,0 +1,720 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <net/mac80211.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-4965.h" + +static struct iwl_link_quality_cmd * +iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) +{ + int i, r; + struct iwl_link_quality_cmd *link_cmd; + u32 rate_flags = 0; + __le32 rate_n_flags; + + link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); + if (!link_cmd) { + IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); + return NULL; + } + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) << + RATE_MCS_ANT_POS; + rate_n_flags = iwl4965_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + link_cmd->rs_table[i].rate_n_flags = rate_n_flags; + + link_cmd->general_params.single_stream_ant_msk = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + if (!link_cmd->general_params.dual_stream_ant_msk) { + link_cmd->general_params.dual_stream_ant_msk = ANT_AB; + } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + link_cmd->sta_id = sta_id; + + return link_cmd; +} + +/* + * iwl4965_add_bssid_station - Add the special IBSS BSSID station + * + * Function sleeps. + */ +int +iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r) +{ + int ret; + u8 sta_id; + struct iwl_link_quality_cmd *link_cmd; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IWL_INVALID_STATION; + + ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].used |= IWL_STA_LOCAL; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + /* Set up default rate scaling table in device's station table */ + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for station %pM.\n", + addr); + return -ENOMEM; + } + + ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); + if (ret) + IWL_ERR(priv, "Link quality command failed (%d)\n", ret); + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + bool send_if_empty) +{ + int i, not_empty = 0; + u8 buff[sizeof(struct iwl_wep_cmd) + + sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; + struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; + size_t cmd_size = sizeof(struct iwl_wep_cmd); + struct iwl_host_cmd cmd = { + .id = ctx->wep_key_cmd, + .data = wep_cmd, + .flags = CMD_SYNC, + }; + + might_sleep(); + + memset(wep_cmd, 0, cmd_size + + (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); + + for (i = 0; i < WEP_KEYS_MAX ; i++) { + wep_cmd->key[i].key_index = i; + if (ctx->wep_keys[i].key_size) { + wep_cmd->key[i].key_offset = i; + not_empty = 1; + } else { + wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; + } + + wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; + memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, + ctx->wep_keys[i].key_size); + } + + wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; + wep_cmd->num_keys = WEP_KEYS_MAX; + + cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; + + cmd.len = cmd_size; + + if (not_empty || send_if_empty) + return iwl_legacy_send_cmd(priv, &cmd); + else + return 0; +} + +int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + lockdep_assert_held(&priv->mutex); + + return iwl4965_static_wepkey_cmd(priv, ctx, false); +} + +int iwl4965_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", + keyconf->keyidx); + + memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_WEPKEY command due to RFKILL.\n"); + /* but keys in device are clear anyway so return success */ + return 0; + } + ret = iwl4965_static_wepkey_cmd(priv, ctx, 1); + IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", + keyconf->keyidx, ret); + + return ret; +} + +int iwl4965_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (keyconf->keylen != WEP_KEY_LEN_128 && + keyconf->keylen != WEP_KEY_LEN_64) { + IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen); + return -EINVAL; + } + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = HW_KEY_DEFAULT; + priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; + + ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; + memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, + keyconf->keylen); + + ret = iwl4965_static_wepkey_cmd(priv, ctx, false); + IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", + keyconf->keylen, keyconf->keyidx, ret); + + return ret; +} + +static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + + key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (keyconf->keylen == WEP_KEY_LEN_128) + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; + + memcpy(priv->stations[sta_id].keyinfo.key, + keyconf->key, keyconf->keylen); + + memcpy(&priv->stations[sta_id].sta.key.key[3], + keyconf->key, keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = 16; + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + + + /* This copy is acutally not needed: we get the key with each TX */ + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +void iwl4965_update_tkip_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) +{ + u8 sta_id; + unsigned long flags; + int i; + + if (iwl_legacy_scan_cancel(priv)) { + /* cancel scan failed, just live w/ bad key and rely + briefly on SW decryption */ + return; + } + + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta); + if (sta_id == IWL_INVALID_STATION) + return; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; + + for (i = 0; i < 5; i++) + priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = + cpu_to_le16(phase1key[i]); + + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} + +int iwl4965_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + u16 key_flags; + u8 keyidx; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys--; + + spin_lock_irqsave(&priv->sta_lock, flags); + key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); + keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; + + IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (keyconf->keyidx != keyidx) { + /* We need to remove a key with index different that the one + * in the uCode. This means that the key we need to remove has + * been replaced by another one with different index. + * Don't do anything and return ok + */ + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + IWL_WARN(priv, "Removing wrong key %d 0x%x\n", + keyconf->keyidx, key_flags); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, + &priv->ucode_key_table)) + IWL_ERR(priv, "index %d not used in uCode key table.\n", + priv->stations[sta_id].sta.key.key_offset); + memset(&priv->stations[sta_id].keyinfo, 0, + sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = + STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; + priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys++; + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl4965_set_wep_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + default: + IWL_ERR(priv, + "Unknown alg: %s cipher = %x\n", __func__, + keyconf->cipher); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, + "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} + +/** + * iwl4965_alloc_bcast_station - add broadcast station into driver's station table. + * + * This adds the broadcast station into the driver's station table + * and marks it driver active, so that it will be restored to the + * device at the next best time. + */ +int iwl4965_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_link_quality_cmd *link_cmd; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + sta_id = iwl_legacy_prep_station(priv, ctx, iwl_bcast_addr, + false, NULL); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return -EINVAL; + } + + priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used |= IWL_STA_BCAST; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +/** + * iwl4965_update_bcast_station - update broadcast station's LQ command + * + * Only used by iwl4965. Placed here to have all bcast station management + * code together. + */ +static int iwl4965_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + unsigned long flags; + struct iwl_link_quality_cmd *link_cmd; + u8 sta_id = ctx->bcast_sta_id; + + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->stations[sta_id].lq) + kfree(priv->stations[sta_id].lq); + else + IWL_DEBUG_INFO(priv, + "Bcast station rate scaling has not been initialized yet.\n"); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +int iwl4965_update_bcast_stations(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret = 0; + + for_each_context(priv, ctx) { + ret = iwl4965_update_bcast_station(priv, ctx); + if (ret) + break; + } + + return ret; +} + +/** + * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table + */ +int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) +{ + unsigned long flags; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + /* Remove "disable" flag, to enable Tx for this TID */ + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn) +{ + unsigned long flags; + int sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid) +{ + unsigned long flags; + int sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +void +iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = + STA_MODIFY_SLEEP_TX_COUNT_MSK; + priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_legacy_send_add_sta(priv, + &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c new file mode 100644 index 000000000000..2e0f0dbf87ec --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c @@ -0,0 +1,1359 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" + +/* + * mac80211 queues, ACs, hardware queues, FIFOs. + * + * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues + * + * Mac80211 uses the following numbers, which we get as from it + * by way of skb_get_queue_mapping(skb): + * + * VO 0 + * VI 1 + * BE 2 + * BK 3 + * + * + * Regular (not A-MPDU) frames are put into hardware queues corresponding + * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their + * own queue per aggregation session (RA/TID combination), such queues are + * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In + * order to map frames to the right queue, we also need an AC->hw queue + * mapping. This is implemented here. + * + * Due to the way hw queues are set up (by the hw specific modules like + * iwl-4965.c), the AC->hw queue mapping is the identity + * mapping. + */ + +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO +}; + +static inline int iwl4965_get_ac_from_tid(u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return tid_to_ac[tid]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +static inline int +iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return ctx->ac_to_fifo[tid_to_ac[tid]]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv, + struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + u8 std_id) +{ + __le16 fc = hdr->frame_control; + __le32 tx_flags = tx_cmd->tx_flags; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +#define RTS_DFAULT_RETRY_LIMIT 60 + +static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + __le16 fc) +{ + u32 rate_flags; + int rate_idx; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 rate_plcp; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL4965_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + /* Set retry limit on RTS packets */ + rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + /* DATA packets will use the uCode station table for rate/antenna + * selection */ + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + return; + } + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * index is invalid. + */ + rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || + (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) + rate_idx = rate_lowest_index(&priv->bands[info->band], + info->control.sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set up antennas */ + priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); + + rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); +} + +static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_key(keyconf, skb_frag, + IEEE80211_TKIP_P2_KEY, tx_cmd->key); + IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | + (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", keyconf->keyidx); + break; + + default: + IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); + break; + } +} + +/* + * start REPLY_TX command process + */ +int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; + struct iwl_station_priv *sta_priv = NULL; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + struct iwl_tx_cmd *tx_cmd; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int txq_id; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + dma_addr_t scratch_phys; + u16 len, firstlen, secondlen; + u16 seq_number = 0; + __le16 fc; + u8 hdr_len; + u8 sta_id; + u8 wait_write_ptr = 0; + u8 tid = 0; + u8 *qc = NULL; + unsigned long flags; + bool is_agg = false; + + if (info->control.vif) + ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif); + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + hdr_len = ieee80211_hdrlen(fc); + + /* Find index into station table for destination station */ + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; + } + + IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); + + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_priv->asleep && + (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) { + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + */ + iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1); + } + + /* + * Send this frame after DTIM -- there's a special queue + * reserved for this for contexts that support AP mode. + */ + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + txq_id = ctx->mcast_queue; + /* + * The microcode will clear the more data + * bit in the last frame it transmits. + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else + txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; + + /* irqs already disabled/saved above when locking priv->lock */ + spin_lock(&priv->sta_lock); + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { + spin_unlock(&priv->sta_lock); + goto drop_unlock; + } + seq_number = priv->stations[sta_id].tid[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + /* aggregation is on for this <sta,tid> */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; + is_agg = true; + } + } + + txq = &priv->txq[txq_id]; + q = &txq->q; + + if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) { + spin_unlock(&priv->sta_lock); + goto drop_unlock; + } + + if (ieee80211_is_data_qos(fc)) { + priv->stations[sta_id].tid[tid].tfds_in_queue++; + if (!ieee80211_has_morefrags(fc)) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + + spin_unlock(&priv->sta_lock); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = ctx; + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[q->write_ptr]; + out_meta = &txq->meta[q->write_ptr]; + tx_cmd = &out_cmd->cmd.tx; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); + iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); + + iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc); + + iwl_legacy_update_stats(priv, true, fc, len); + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + firstlen = (len + 3) & ~3; + + /* Tell NIC about any 2-byte padding after MAC header */ + if (firstlen != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, + &out_cmd->hdr, firstlen, + PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, firstlen); + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, firstlen, 1, 0); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + secondlen = skb->len - hdr_len; + if (secondlen > 0) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + secondlen, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, secondlen, + 0, 0); + } + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, + firstlen, PCI_DMA_BIDIRECTIONAL); + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys); + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); + + /* Set up entry for this TFD in Tx byte-count array */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, + firstlen, PCI_DMA_BIDIRECTIONAL); + + trace_iwlwifi_legacy_dev_tx(priv, + &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &out_cmd->hdr, firstlen, + skb->data + hdr_len, secondlen); + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + + /* + * Avoid atomic ops if it isn't an associated client. + * Also, if this is a packet for aggregation, don't + * increase the counter because the ucode will stop + * aggregation queues when their respective station + * goes to sleep. + */ + if (sta_priv && sta_priv->client && !is_agg) + atomic_inc(&sta_priv->pending_frames); + + if ((iwl_legacy_queue_space(q) < q->high_mark) && + priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } else { + iwl_legacy_stop_queue(priv, txq); + } + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return -1; +} + +static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr, size_t size) +{ + ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, + GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +/** + * iwl4965_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) { + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (txq_id == priv->cmd_queue) + iwl_legacy_cmd_queue_free(priv); + else + iwl_legacy_tx_queue_free(priv, txq_id); + } + iwl4965_free_dma_ptr(priv, &priv->kw); + + iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); + + /* free tx queue structure */ + iwl_legacy_txq_mem(priv); +} + +/** + * iwl4965_txq_ctx_alloc - allocate TX queue context + * Allocate all Tx DMA structures and initialize them + * + * @param priv + * @return error code + */ +int iwl4965_txq_ctx_alloc(struct iwl_priv *priv) +{ + int ret; + int txq_id, slots_num; + unsigned long flags; + + /* Free all tx/cmd queues and keep-warm buffer */ + iwl4965_hw_txq_ctx_free(priv); + + ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls, + priv->hw_params.scd_bc_tbls_size); + if (ret) { + IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); + goto error_bc_tbls; + } + /* Alloc keep-warm buffer */ + ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(priv, "Keep Warm allocation failed\n"); + goto error_kw; + } + + /* allocate tx queue structure */ + ret = iwl_legacy_alloc_txq_mem(priv); + if (ret) + goto error; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl4965_txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == priv->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_legacy_tx_queue_init(priv, + &priv->txq[txq_id], slots_num, + txq_id); + if (ret) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return ret; + + error: + iwl4965_hw_txq_ctx_free(priv); + iwl4965_free_dma_ptr(priv, &priv->kw); + error_kw: + iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); + error_bc_tbls: + return ret; +} + +void iwl4965_txq_ctx_reset(struct iwl_priv *priv) +{ + int txq_id, slots_num; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl4965_txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = txq_id == priv->cmd_queue ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id], + slots_num, txq_id); + } +} + +/** + * iwl4965_txq_ctx_stop - Stop all Tx DMA channels + */ +void iwl4965_txq_ctx_stop(struct iwl_priv *priv) +{ + int ch; + unsigned long flags; + + /* Turn off all Tx DMA fifos */ + spin_lock_irqsave(&priv->lock, flags); + + iwl4965_txq_set_sched(priv, 0); + + /* Stop each Tx DMA channel, and wait for it to be idle */ + for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { + iwl_legacy_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), + 1000)) + IWL_ERR(priv, "Failing on timeout while stopping" + " DMA channel %d [0x%08x]", ch, + iwl_legacy_read_direct32(priv, + FH_TSSR_TX_STATUS_REG)); + } + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* + * Find first available (lowest unused) Tx Queue, mark it "active". + * Called only when finding queue for aggregation. + * Should never return anything < 7, because they should already + * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) + */ +static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) +{ + int txq_id; + + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +/** + * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration + */ +static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, + u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + iwl_legacy_write_prph(priv, + IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +/** + * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue + */ +static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} + +/** + * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue + * + * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE, + * i.e. it must be one of the higher queues used for aggregation + */ +static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, u16 ssn_idx) +{ + unsigned long flags; + u16 ra_tid; + int ret; + + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + ra_tid = BUILD_RAxTID(sta_id, tid); + + /* Modify device's station table to Tx this TID */ + ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid); + if (ret) + return ret; + + spin_lock_irqsave(&priv->lock, flags); + + /* Stop this Tx queue before configuring it */ + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + /* Map receiver-address / traffic-ID to this queue */ + iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + /* Set this queue as a chain-building queue */ + iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + /* Set up Tx window size and frame limit for this queue */ + iwl_legacy_write_targ_mem(priv, + priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) + & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + + +int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + int sta_id; + int tx_fifo; + int txq_id; + int ret; + unsigned long flags; + struct iwl_tid_data *tid_data; + + tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo < 0)) + return tx_fifo; + + IWL_WARN(priv, "%s on ra = %pM tid = %d\n", + __func__, sta->addr, tid); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Start AGG on invalid station\n"); + return -ENXIO; + } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { + IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); + return -ENXIO; + } + + txq_id = iwl4965_txq_ctx_activate_free(priv); + if (txq_id == -1) { + IWL_ERR(priv, "No free aggregation queue available\n"); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + iwl_legacy_set_swq_id(&priv->txq[txq_id], + iwl4965_get_ac_from_tid(tid), txq_id); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo, + sta_id, tid, *ssn); + if (ret) + return ret; + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + IWL_DEBUG_HT(priv, + "HW queue is NOT empty: %d packets in HW queue\n", + tid_data->tfds_in_queue); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +/** + * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE + * priv->lock must be held by the caller + */ +static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl_legacy_clear_bits_prph(priv, + IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_legacy_clear_bits_prph(priv, + IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(priv, txq_id); + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + return 0; +} + +int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + int tx_fifo_id, txq_id, sta_id, ssn; + struct iwl_tid_data *tid_data; + int write_ptr, read_ptr; + unsigned long flags; + + tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo_id < 0)) + return tx_fifo_id; + + sta_id = iwl_legacy_sta_id(sta); + + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); + } + + write_ptr = priv->txq[txq_id].q.write_ptr; + read_ptr = priv->txq[txq_id].q.read_ptr; + + /* The queue is not empty */ + if (write_ptr != read_ptr) { + IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); + priv->stations[sta_id].tid[tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + turn_off: + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&priv->sta_lock); + spin_lock(&priv->lock); + + /* + * the only reason this call can fail is queue number out of range, + * which can happen if uCode is reloaded and all the station + * information are lost. if it is outside the range, there is no need + * to deactivate the uCode queue, just return "success" to allow + * mac80211 to clean up it own data. + */ + iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id); + spin_unlock_irqrestore(&priv->lock, flags); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + return 0; +} + +int iwl4965_txq_check_empty(struct iwl_priv *priv, + int sta_id, u8 tid, int txq_id) +{ + struct iwl_queue *q = &priv->txq[txq_id].q; + u8 *addr = priv->stations[sta_id].sta.sta.addr; + struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; + struct iwl_rxon_context *ctx; + + ctx = &priv->contexts[priv->stations[sta_id].ctxid]; + + lockdep_assert_held(&priv->sta_lock); + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* We are reclaiming the last packet of the */ + /* aggregated HW queue */ + if ((txq_id == tid_data->agg.txq_id) && + (q->read_ptr == q->write_ptr)) { + u16 ssn = SEQ_TO_SN(tid_data->seq_number); + int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid); + IWL_DEBUG_HT(priv, + "HW queue empty: continue DELBA flow\n"); + iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo); + tid_data->agg.state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* We are reclaiming the last packet of the queue */ + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, + "HW queue empty: continue ADDBA flow\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + } + + return 0; +} + +static void iwl4965_non_agg_tx_status(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr1) +{ + struct ieee80211_sta *sta; + struct iwl_station_priv *sta_priv; + + rcu_read_lock(); + sta = ieee80211_find_sta(ctx->vif, addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(priv->hw, sta, false); + } + rcu_read_unlock(); +} + +static void +iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info, + bool is_agg) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; + + if (!is_agg) + iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1); + + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); +} + +int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + int nfreed = 0; + struct ieee80211_hdr *hdr; + + if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->write_ptr, q->read_ptr); + return 0; + } + + for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + iwl4965_tx_status(priv, tx_info, + txq_id >= IWL4965_FIRST_AMPDU_QUEUE); + + hdr = (struct ieee80211_hdr *)tx_info->skb->data; + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) + nfreed++; + tx_info->skb = NULL; + + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + return nfreed; +} + +/** + * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack + * + * Go through block-ack's bitmap of ACK'd frames, update driver's record of + * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. + */ +static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_compressed_ba_resp *ba_resp) + +{ + int i, sh, ack; + u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + int successes = 0; + struct ieee80211_tx_info *info; + u64 bitmap, sent_bitmap; + + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IWL_ERR(priv, "Received BA when not expected\n"); + return -EINVAL; + } + + /* Mark that the expected block-ack response arrived */ + agg->wait_for_ba = 0; + IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, + ba_resp->seq_ctl); + + /* Calculate shift to align block-ack bits with our Tx window bits */ + sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); + if (sh < 0) /* tbw something is wrong with indices */ + sh += 0x100; + + if (agg->frame_count > (64 - sh)) { + IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); + return -1; + } + + /* don't use 64-bit values for now */ + bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; + + /* check for success or failure according to the + * transmitted bitmap and block-ack bitmap */ + sent_bitmap = bitmap & agg->bitmap; + + /* For each frame attempted in aggregation, + * update driver's record of tx frame's status. */ + i = 0; + while (sent_bitmap) { + ack = sent_bitmap & 1ULL; + successes += ack; + IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", + ack ? "ACK" : "NACK", i, + (agg->start_idx + i) & 0xff, + agg->start_idx + i); + sent_bitmap >>= 1; + ++i; + } + + IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", + (unsigned long long)bitmap); + + info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = successes; + info->status.ampdu_len = agg->frame_count; + iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info); + + return 0; +} + +/** + * translate ucode response to mac80211 tx status control values + */ +void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->control.rates[0]; + + info->antenna_sel_tx = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_MCS_HT_MSK) + r->flags |= IEEE80211_TX_RC_MCS; + if (rate_n_flags & RATE_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + if (rate_n_flags & RATE_MCS_HT40_MSK) + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (rate_n_flags & RATE_MCS_DUP_MSK) + r->flags |= IEEE80211_TX_RC_DUP_DATA; + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); +} + +/** + * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA + * + * Handles block-acknowledge notification from device, which reports success + * of frames sent via aggregation. + */ +void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + struct iwl_tx_queue *txq = NULL; + struct iwl_ht_agg *agg; + int index; + int sta_id; + int tid; + unsigned long flags; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (scd_flow >= priv->hw_params.max_txq_num) { + IWL_ERR(priv, + "BUG_ON scd_flow is bigger than number of queues\n"); + return; + } + + txq = &priv->txq[scd_flow]; + sta_id = ba_resp->sta_id; + tid = ba_resp->tid; + agg = &priv->stations[sta_id].tid[tid].agg; + if (unlikely(agg->txq_id != scd_flow)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now! + * since it is possible happen very often and in order + * not to fill the syslog, don't enable the logging by default + */ + IWL_DEBUG_TX_REPLY(priv, + "BA scd_flow %d does not match txq_id %d\n", + scd_flow, agg->txq_id); + return; + } + + /* Find index just before block-ack window */ + index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + spin_lock_irqsave(&priv->sta_lock, flags); + + IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " + "sta_id = %d\n", + agg->wait_for_ba, + (u8 *) &ba_resp->sta_addr_lo32, + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx," + "scd_flow = " + "%d, scd_ssn = %d\n", + ba_resp->tid, + ba_resp->seq_ctl, + (unsigned long long)le64_to_cpu(ba_resp->bitmap), + ba_resp->scd_flow, + ba_resp->scd_ssn); + IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", + agg->start_idx, + (unsigned long long)agg->bitmap); + + /* Update driver's record of ACK vs. not for each frame in window */ + iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { + /* calculate mac80211 ampdu sw queue to wake */ + int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); + + if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) && + priv->mac80211_registered && + (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_legacy_wake_queue(priv, txq); + + iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow); + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +const char *iwl4965_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(FIFO_UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); + TX_STATUS_FAIL(PASSIVE_NO_RX); + TX_STATUS_FAIL(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c new file mode 100644 index 000000000000..001d148feb94 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c @@ -0,0 +1,166 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" +#include "iwl-4965-calib.h" + +#define IWL_AC_UNSET -1 + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int +iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int ret = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL4965_RTC_INST_LOWER_BOUND); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + ret = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return ret; +} + +/** + * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image, + u32 len) +{ + u32 val; + u32 save_len = len; + int ret = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL4965_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + ret = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return ret; +} + +/** + * iwl4965_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +int iwl4965_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int ret; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl4965_verify_inst_full(priv, image, len); + + return ret; +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c new file mode 100644 index 000000000000..080444c89022 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965.c @@ -0,0 +1,2188 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-calib.h" +#include "iwl-sta.h" +#include "iwl-4965-led.h" +#include "iwl-4965.h" +#include "iwl-4965-debugfs.h" + +static int iwl4965_send_tx_power(struct iwl_priv *priv); +static int iwl4965_hw_get_temperature(struct iwl_priv *priv); + +/* Highest firmware API version supported */ +#define IWL4965_UCODE_API_MAX 2 + +/* Lowest firmware API version supported */ +#define IWL4965_UCODE_API_MIN 2 + +#define IWL4965_FW_PRE "iwlwifi-4965-" +#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" +#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) + +/* check contents of special bootstrap uCode SRAM */ +static int iwl4965_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image++) { + val = iwl_legacy_read_prph(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl4965_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl4965_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int i; + u32 done; + u32 reg_offset; + int ret; + + IWL_DEBUG_INFO(priv, "Begin load bsm\n"); + + priv->ucode_type = UCODE_RT; + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL49_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... host DRAM physical address bits 35:4 for 4965. + * NOTE: iwl_init_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. + */ + pinst = priv->ucode_init.p_addr >> 4; + pdata = priv->ucode_init_data.p_addr >> 4; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image)); + + ret = iwl4965_verify_bsm(priv); + if (ret) + return ret; + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_legacy_write_prph(priv, + BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); + iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i); + else { + IWL_ERR(priv, "BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_legacy_write_prph(priv, + BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); + + + return 0; +} + +/** + * iwl4965_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int ret = 0; + + /* bits 35:4 for 4965 */ + pinst = priv->ucode_code.p_addr >> 4; + pdata = priv->ucode_data_backup.p_addr >> 4; + + /* Tell bootstrap uCode where to find image to load */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); + + return ret; +} + +/** + * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl4965_init_alive_start(struct iwl_priv *priv) +{ + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl4965_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Calculate temperature */ + priv->temperature = iwl4965_hw_get_temperature(priv); + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + if (iwl4965_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + +restart: + queue_work(priv->workqueue, &priv->restart); +} + +static bool iw4965_is_ht40_channel(__le32 rxon_flags) +{ + int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + return ((chan_mod == CHANNEL_MODE_PURE_40) || + (chan_mod == CHANNEL_MODE_MIXED)); +} + +static void iwl4965_nic_config(struct iwl_priv *priv) +{ + unsigned long flags; + u16 radio_cfg; + + spin_lock_irqsave(&priv->lock, flags); + + radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + priv->calib_info = (struct iwl_eeprom_calib_info *) + iwl_legacy_eeprom_query_addr(priv, + EEPROM_4965_CALIB_TXPOWER_OFFSET); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Reset differential Rx gains in NIC to prepare for chain noise calibration. + * Called after every association, but this runs only once! + * ... once chain noise is calibrated the first time, it's good forever. */ +static void iwl4965_chain_noise_reset(struct iwl_priv *priv) +{ + struct iwl_chain_noise_data *data = &(priv->chain_noise_data); + + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && + iwl_legacy_is_any_associated(priv)) { + struct iwl_calib_diff_gain_cmd cmd; + + /* clear data for chain noise calibration algorithm */ + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = 0; + cmd.diff_gain_b = 0; + cmd.diff_gain_c = 0; + if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd)) + IWL_ERR(priv, + "Could not send REPLY_PHY_CALIBRATION_CMD\n"); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); + } +} + +static struct iwl_sensitivity_ranges iwl4965_sensitivity = { + .min_nrg_cck = 97, + .max_nrg_cck = 0, /* not used, set to 0 */ + + .auto_corr_min_ofdm = 85, + .auto_corr_min_ofdm_mrc = 170, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 220, + + .auto_corr_max_ofdm = 120, + .auto_corr_max_ofdm_mrc = 210, + .auto_corr_max_ofdm_x1 = 140, + .auto_corr_max_ofdm_mrc_x1 = 270, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 200, + .auto_corr_min_cck_mrc = 200, + .auto_corr_max_cck_mrc = 400, + + .nrg_th_cck = 100, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static void iwl4965_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Kelvin */ + priv->hw_params.ct_kill_threshold = + CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY); +} + +/** + * iwl4965_hw_set_hw_params + * + * Called when initializing driver + */ +static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES) + priv->cfg->base_params->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; + priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->base_params->num_of_queues * + sizeof(struct iwl4965_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWL4965_STATION_COUNT; + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID; + priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE; + priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ); + + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + + iwl4965_set_ct_threshold(priv); + + priv->hw_params.sens = &iwl4965_sensitivity; + priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS; + + return 0; +} + +static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) +{ + s32 sign = 1; + + if (num < 0) { + sign = -sign; + num = -num; + } + if (denom < 0) { + sign = -sign; + denom = -denom; + } + *res = 1; + *res = ((num * 2 + denom) / (denom * 2)) * sign; + + return 1; +} + +/** + * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower + * + * Determines power supply voltage compensation for txpower calculations. + * Returns number of 1/2-dB steps to subtract from gain table index, + * to compensate for difference between power supply voltage during + * factory measurements, vs. current power supply voltage. + * + * Voltage indication is higher for lower voltage. + * Lower voltage requires more gain (lower gain table index). + */ +static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, + s32 current_voltage) +{ + s32 comp = 0; + + if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || + (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) + return 0; + + iwl4965_math_div_round(current_voltage - eeprom_voltage, + TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); + + if (current_voltage > eeprom_voltage) + comp *= 2; + if ((comp < -2) || (comp > 2)) + comp = 0; + + return comp; +} + +static s32 iwl4965_get_tx_atten_grp(u16 channel) +{ + if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) + return CALIB_CH_GROUP_5; + + if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) + return CALIB_CH_GROUP_1; + + if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) + return CALIB_CH_GROUP_2; + + if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) + return CALIB_CH_GROUP_3; + + if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) + return CALIB_CH_GROUP_4; + + return -1; +} + +static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) +{ + s32 b = -1; + + for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { + if (priv->calib_info->band_info[b].ch_from == 0) + continue; + + if ((channel >= priv->calib_info->band_info[b].ch_from) + && (channel <= priv->calib_info->band_info[b].ch_to)) + break; + } + + return b; +} + +static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + s32 val; + + if (x2 == x1) + return y1; + else { + iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); + return val + y2; + } +} + +/** + * iwl4965_interpolate_chan - Interpolate factory measurements for one channel + * + * Interpolates factory measurements from the two sample channels within a + * sub-band, to apply to channel of interest. Interpolation is proportional to + * differences in channel frequencies, which is proportional to differences + * in channel number. + */ +static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, + struct iwl_eeprom_calib_ch_info *chan_info) +{ + s32 s = -1; + u32 c; + u32 m; + const struct iwl_eeprom_calib_measure *m1; + const struct iwl_eeprom_calib_measure *m2; + struct iwl_eeprom_calib_measure *omeas; + u32 ch_i1; + u32 ch_i2; + + s = iwl4965_get_sub_band(priv, channel); + if (s >= EEPROM_TX_POWER_BANDS) { + IWL_ERR(priv, "Tx Power can not find channel %d\n", channel); + return -1; + } + + ch_i1 = priv->calib_info->band_info[s].ch1.ch_num; + ch_i2 = priv->calib_info->band_info[s].ch2.ch_num; + chan_info->ch_num = (u8) channel; + + IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n", + channel, s, ch_i1, ch_i2); + + for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { + for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { + m1 = &(priv->calib_info->band_info[s].ch1. + measurements[c][m]); + m2 = &(priv->calib_info->band_info[s].ch2. + measurements[c][m]); + omeas = &(chan_info->measurements[c][m]); + + omeas->actual_pow = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->actual_pow, + ch_i2, + m2->actual_pow); + omeas->gain_idx = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->gain_idx, ch_i2, + m2->gain_idx); + omeas->temperature = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->temperature, + ch_i2, + m2->temperature); + omeas->pa_det = + (s8) iwl4965_interpolate_value(channel, ch_i1, + m1->pa_det, ch_i2, + m2->pa_det); + + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, + m1->actual_pow, m2->actual_pow, omeas->actual_pow); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, + m1->gain_idx, m2->gain_idx, omeas->gain_idx); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, + m1->pa_det, m2->pa_det, omeas->pa_det); + IWL_DEBUG_TXPOWER(priv, + "chain %d meas %d T1=%d T2=%d T=%d\n", c, m, + m1->temperature, m2->temperature, + omeas->temperature); + } + } + + return 0; +} + +/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, + * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ +static s32 back_off_table[] = { + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ + 10 /* CCK */ +}; + +/* Thermal compensation values for txpower for various frequency ranges ... + * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ +static struct iwl4965_txpower_comp_entry { + s32 degrees_per_05db_a; + s32 degrees_per_05db_a_denom; +} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { + {9, 2}, /* group 0 5.2, ch 34-43 */ + {4, 1}, /* group 1 5.2, ch 44-70 */ + {4, 1}, /* group 2 5.2, ch 71-124 */ + {4, 1}, /* group 3 5.2, ch 125-200 */ + {3, 1} /* group 4 2.4, ch all */ +}; + +static s32 get_min_power_index(s32 rate_power_index, u32 band) +{ + if (!band) { + if ((rate_power_index & 7) <= 4) + return MIN_TX_GAIN_INDEX_52GHZ_EXT; + } + return MIN_TX_GAIN_INDEX; +} + +struct gain_entry { + u8 dsp; + u8 radio; +}; + +static const struct gain_entry gain_table[2][108] = { + /* 5.2GHz power gain index table */ + { + {123, 0x3F}, /* highest txpower */ + {117, 0x3F}, + {110, 0x3F}, + {104, 0x3F}, + {98, 0x3F}, + {110, 0x3E}, + {104, 0x3E}, + {98, 0x3E}, + {110, 0x3D}, + {104, 0x3D}, + {98, 0x3D}, + {110, 0x3C}, + {104, 0x3C}, + {98, 0x3C}, + {110, 0x3B}, + {104, 0x3B}, + {98, 0x3B}, + {110, 0x3A}, + {104, 0x3A}, + {98, 0x3A}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x25}, + {104, 0x25}, + {98, 0x25}, + {110, 0x24}, + {104, 0x24}, + {98, 0x24}, + {110, 0x23}, + {104, 0x23}, + {98, 0x23}, + {110, 0x22}, + {104, 0x18}, + {98, 0x18}, + {110, 0x17}, + {104, 0x17}, + {98, 0x17}, + {110, 0x16}, + {104, 0x16}, + {98, 0x16}, + {110, 0x15}, + {104, 0x15}, + {98, 0x15}, + {110, 0x14}, + {104, 0x14}, + {98, 0x14}, + {110, 0x13}, + {104, 0x13}, + {98, 0x13}, + {110, 0x12}, + {104, 0x08}, + {98, 0x08}, + {110, 0x07}, + {104, 0x07}, + {98, 0x07}, + {110, 0x06}, + {104, 0x06}, + {98, 0x06}, + {110, 0x05}, + {104, 0x05}, + {98, 0x05}, + {110, 0x04}, + {104, 0x04}, + {98, 0x04}, + {110, 0x03}, + {104, 0x03}, + {98, 0x03}, + {110, 0x02}, + {104, 0x02}, + {98, 0x02}, + {110, 0x01}, + {104, 0x01}, + {98, 0x01}, + {110, 0x00}, + {104, 0x00}, + {98, 0x00}, + {93, 0x00}, + {88, 0x00}, + {83, 0x00}, + {78, 0x00}, + }, + /* 2.4GHz power gain index table */ + { + {110, 0x3f}, /* highest txpower */ + {104, 0x3f}, + {98, 0x3f}, + {110, 0x3e}, + {104, 0x3e}, + {98, 0x3e}, + {110, 0x3d}, + {104, 0x3d}, + {98, 0x3d}, + {110, 0x3c}, + {104, 0x3c}, + {98, 0x3c}, + {110, 0x3b}, + {104, 0x3b}, + {98, 0x3b}, + {110, 0x3a}, + {104, 0x3a}, + {98, 0x3a}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x6}, + {104, 0x6}, + {98, 0x6}, + {110, 0x5}, + {104, 0x5}, + {98, 0x5}, + {110, 0x4}, + {104, 0x4}, + {98, 0x4}, + {110, 0x3}, + {104, 0x3}, + {98, 0x3}, + {110, 0x2}, + {104, 0x2}, + {98, 0x2}, + {110, 0x1}, + {104, 0x1}, + {98, 0x1}, + {110, 0x0}, + {104, 0x0}, + {98, 0x0}, + {97, 0}, + {96, 0}, + {95, 0}, + {94, 0}, + {93, 0}, + {92, 0}, + {91, 0}, + {90, 0}, + {89, 0}, + {88, 0}, + {87, 0}, + {86, 0}, + {85, 0}, + {84, 0}, + {83, 0}, + {82, 0}, + {81, 0}, + {80, 0}, + {79, 0}, + {78, 0}, + {77, 0}, + {76, 0}, + {75, 0}, + {74, 0}, + {73, 0}, + {72, 0}, + {71, 0}, + {70, 0}, + {69, 0}, + {68, 0}, + {67, 0}, + {66, 0}, + {65, 0}, + {64, 0}, + {63, 0}, + {62, 0}, + {61, 0}, + {60, 0}, + {59, 0}, + } +}; + +static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, + u8 is_ht40, u8 ctrl_chan_high, + struct iwl4965_tx_power_db *tx_power_tbl) +{ + u8 saturation_power; + s32 target_power; + s32 user_target_power; + s32 power_limit; + s32 current_temp; + s32 reg_limit; + s32 current_regulatory; + s32 txatten_grp = CALIB_CH_GROUP_MAX; + int i; + int c; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_eeprom_calib_ch_info ch_eeprom_info; + const struct iwl_eeprom_calib_measure *measurement; + s16 voltage; + s32 init_voltage; + s32 voltage_compensation; + s32 degrees_per_05db_num; + s32 degrees_per_05db_denom; + s32 factory_temp; + s32 temperature_comp[2]; + s32 factory_gain_index[2]; + s32 factory_actual_pwr[2]; + s32 power_index; + + /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units + * are used for indexing into txpower table) */ + user_target_power = 2 * priv->tx_power_user_lmt; + + /* Get current (RXON) channel, band, width */ + IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, + is_ht40); + + ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel); + + if (!iwl_legacy_is_channel_valid(ch_info)) + return -EINVAL; + + /* get txatten group, used to select 1) thermal txpower adjustment + * and 2) mimo txpower balance between Tx chains. */ + txatten_grp = iwl4965_get_tx_atten_grp(channel); + if (txatten_grp < 0) { + IWL_ERR(priv, "Can't find txatten group for channel %d.\n", + channel); + return -EINVAL; + } + + IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n", + channel, txatten_grp); + + if (is_ht40) { + if (ctrl_chan_high) + channel -= 2; + else + channel += 2; + } + + /* hardware txpower limits ... + * saturation (clipping distortion) txpowers are in half-dBm */ + if (band) + saturation_power = priv->calib_info->saturation_power24; + else + saturation_power = priv->calib_info->saturation_power52; + + if (saturation_power < IWL_TX_POWER_SATURATION_MIN || + saturation_power > IWL_TX_POWER_SATURATION_MAX) { + if (band) + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; + else + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; + } + + /* regulatory txpower limits ... reg_limit values are in half-dBm, + * max_power_avg values are in dBm, convert * 2 */ + if (is_ht40) + reg_limit = ch_info->ht40_max_power_avg * 2; + else + reg_limit = ch_info->max_power_avg * 2; + + if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || + (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { + if (band) + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; + else + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; + } + + /* Interpolate txpower calibration values for this channel, + * based on factory calibration tests on spaced channels. */ + iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); + + /* calculate tx gain adjustment based on power supply voltage */ + voltage = le16_to_cpu(priv->calib_info->voltage); + init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); + voltage_compensation = + iwl4965_get_voltage_compensation(voltage, init_voltage); + + IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n", + init_voltage, + voltage, voltage_compensation); + + /* get current temperature (Celsius) */ + current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); + current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); + current_temp = KELVIN_TO_CELSIUS(current_temp); + + /* select thermal txpower adjustment params, based on channel group + * (same frequency group used for mimo txatten adjustment) */ + degrees_per_05db_num = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; + degrees_per_05db_denom = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; + + /* get per-chain txpower values from factory measurements */ + for (c = 0; c < 2; c++) { + measurement = &ch_eeprom_info.measurements[c][1]; + + /* txgain adjustment (in half-dB steps) based on difference + * between factory and current temperature */ + factory_temp = measurement->temperature; + iwl4965_math_div_round((current_temp - factory_temp) * + degrees_per_05db_denom, + degrees_per_05db_num, + &temperature_comp[c]); + + factory_gain_index[c] = measurement->gain_idx; + factory_actual_pwr[c] = measurement->actual_pow; + + IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c); + IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, " + "curr tmp %d, comp %d steps\n", + factory_temp, current_temp, + temperature_comp[c]); + + IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n", + factory_gain_index[c], + factory_actual_pwr[c]); + } + + /* for each of 33 bit-rates (including 1 for CCK) */ + for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { + u8 is_mimo_rate; + union iwl4965_tx_power_dual_stream tx_power; + + /* for mimo, reduce each chain's txpower by half + * (3dB, 6 steps), so total output power is regulatory + * compliant. */ + if (i & 0x8) { + current_regulatory = reg_limit - + IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; + is_mimo_rate = 1; + } else { + current_regulatory = reg_limit; + is_mimo_rate = 0; + } + + /* find txpower limit, either hardware or regulatory */ + power_limit = saturation_power - back_off_table[i]; + if (power_limit > current_regulatory) + power_limit = current_regulatory; + + /* reduce user's txpower request if necessary + * for this rate on this channel */ + target_power = user_target_power; + if (target_power > power_limit) + target_power = power_limit; + + IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n", + i, saturation_power - back_off_table[i], + current_regulatory, user_target_power, + target_power); + + /* for each of 2 Tx chains (radio transmitters) */ + for (c = 0; c < 2; c++) { + s32 atten_value; + + if (is_mimo_rate) + atten_value = + (s32)le32_to_cpu(priv->card_alive_init. + tx_atten[txatten_grp][c]); + else + atten_value = 0; + + /* calculate index; higher index means lower txpower */ + power_index = (u8) (factory_gain_index[c] - + (target_power - + factory_actual_pwr[c]) - + temperature_comp[c] - + voltage_compensation + + atten_value); + +/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n", + power_index); */ + + if (power_index < get_min_power_index(i, band)) + power_index = get_min_power_index(i, band); + + /* adjust 5 GHz index to support negative indexes */ + if (!band) + power_index += 9; + + /* CCK, rate 32, reduce txpower for CCK */ + if (i == POWER_TABLE_CCK_ENTRY) + power_index += + IWL_TX_POWER_CCK_COMPENSATION_C_STEP; + + /* stay within the table! */ + if (power_index > 107) { + IWL_WARN(priv, "txpower index %d > 107\n", + power_index); + power_index = 107; + } + if (power_index < 0) { + IWL_WARN(priv, "txpower index %d < 0\n", + power_index); + power_index = 0; + } + + /* fill txpower command for this rate/chain */ + tx_power.s.radio_tx_gain[c] = + gain_table[band][power_index].radio; + tx_power.s.dsp_predis_atten[c] = + gain_table[band][power_index].dsp; + + IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d " + "gain 0x%02x dsp %d\n", + c, atten_value, power_index, + tx_power.s.radio_tx_gain[c], + tx_power.s.dsp_predis_atten[c]); + } /* for each chain */ + + tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); + + } /* for each rate */ + + return 0; +} + +/** + * iwl4965_send_tx_power - Configure the TXPOWER level user limit + * + * Uses the active RXON for channel, band, and characteristics (ht40, high) + * The power limit is taken from priv->tx_power_user_lmt. + */ +static int iwl4965_send_tx_power(struct iwl_priv *priv) +{ + struct iwl4965_txpowertable_cmd cmd = { 0 }; + int ret; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), + "TX Power requested while scanning!\n")) + return -EAGAIN; + + band = priv->band == IEEE80211_BAND_2GHZ; + + is_ht40 = iw4965_is_ht40_channel(ctx->active.flags); + + if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.channel = ctx->active.channel; + + ret = iwl4965_fill_txpower_tbl(priv, band, + le16_to_cpu(ctx->active.channel), + is_ht40, ctrl_chan_high, &cmd.tx_power); + if (ret) + goto out; + + ret = iwl_legacy_send_cmd_pdu(priv, + REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); + +out: + return ret; +} + +static int iwl4965_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int ret = 0; + struct iwl4965_rxon_assoc_cmd rxon_assoc; + const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = ctx->staging.flags; + rxon_assoc.filter_flags = ctx->staging.filter_flags; + rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; + rxon_assoc.reserved = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + ctx->staging.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + ctx->staging.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; + + ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, + sizeof(rxon_assoc), &rxon_assoc, NULL); + if (ret) + return ret; + + return ret; +} + +static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active; + int ret; + bool new_assoc = + !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); + + if (!iwl_legacy_is_alive(priv)) + return -EBUSY; + + if (!ctx->is_active) + return 0; + + /* always get timestamp with Rx frame */ + ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; + + ret = iwl_legacy_check_rxon_cmd(priv, ctx); + if (ret) { + IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* + * receive commit_rxon request + * abort any previous channel switch if still in process + */ + if (priv->switch_rxon.switch_in_progress && + (priv->switch_rxon.channel != ctx->staging.channel)) { + IWL_DEBUG_11H(priv, "abort channel switch on %d\n", + le16_to_cpu(priv->switch_rxon.channel)); + iwl_legacy_chswitch_done(priv, false); + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_legacy_full_rxon_required(priv, ctx)) { + ret = iwl_legacy_send_rxon_assoc(priv, ctx); + if (ret) { + IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); + return ret; + } + + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + iwl_legacy_print_rx_config_cmd(priv, ctx); + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) { + IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), + active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (ret) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); + return ret; + } + iwl_legacy_clear_ucode_stations(priv, ctx); + iwl_legacy_restore_stations(priv, ctx); + ret = iwl4965_restore_default_wep_keys(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); + return ret; + } + } + + IWL_DEBUG_INFO(priv, "Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %pM\n", + (new_assoc ? "" : "out"), + le16_to_cpu(ctx->staging.channel), + ctx->staging.bssid_addr); + + iwl_legacy_set_rxon_hwcrypto(priv, ctx, + !priv->cfg->mod_params->sw_crypto); + + /* Apply the new configuration + * RXON unassoc clears the station table in uCode so restoration of + * stations is needed after it (the RXON command) completes + */ + if (!new_assoc) { + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + iwl_legacy_clear_ucode_stations(priv, ctx); + iwl_legacy_restore_stations(priv, ctx); + ret = iwl4965_restore_default_wep_keys(priv, ctx); + if (ret) { + IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); + return ret; + } + } + if (new_assoc) { + priv->start_calib = 0; + /* Apply the new configuration + * RXON assoc doesn't clear the station table in uCode, + */ + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); + if (ret) { + IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); + return ret; + } + memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); + } + iwl_legacy_print_rx_config_cmd(priv, ctx); + + iwl4965_init_sensitivity(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + ret = iwl_legacy_set_tx_power(priv, priv->tx_power_user_lmt, true); + if (ret) { + IWL_ERR(priv, "Error sending TX power (%d)\n", ret); + return ret; + } + + return 0; +} + +static int iwl4965_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int rc; + u8 band = 0; + bool is_ht40 = false; + u8 ctrl_chan_high = 0; + struct iwl4965_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + band = priv->band == IEEE80211_BAND_2GHZ; + + is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags); + + if (is_ht40 && + (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.expect_beacon = 0; + ch = ch_switch->channel->hw_value; + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_legacy_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_legacy_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch); + if (ch_info) + cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + ctx->active.channel, ch); + return -EFAULT; + } + + rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40, + ctrl_chan_high, &cmd.tx_power); + if (rc) { + IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc); + return rc; + } + + priv->switch_rxon.channel = cmd.channel; + priv->switch_rxon.switch_in_progress = true; + + return iwl_legacy_send_cmd_pdu(priv, + REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); +} + +/** + * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt) +{ + struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + int txq_id = txq->q.id; + int write_ptr = txq->q.write_ptr; + int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); + + bc_ent = cpu_to_le16(len & 0xFFF); + /* Set up byte count within first 256 entries */ + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + /* If within first 64 entries, duplicate at end */ + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id]. + tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; +} + +/** + * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin) + * @statistics: Provides the temperature reading from the uCode + * + * A return of <0 indicates bogus data in the statistics + */ +static int iwl4965_hw_get_temperature(struct iwl_priv *priv) +{ + s32 temperature; + s32 vt; + s32 R1, R2, R3; + u32 R4; + + if (test_bit(STATUS_TEMPERATURE, &priv->status) && + (priv->_4965.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { + IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); + } else { + IWL_DEBUG_TEMP(priv, "Running temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); + } + + /* + * Temperature is only 23 bits, so sign extend out to 32. + * + * NOTE If we haven't received a statistics notification yet + * with an updated temperature, use R4 provided to us in the + * "initialize" ALIVE response. + */ + if (!test_bit(STATUS_TEMPERATURE, &priv->status)) + vt = sign_extend32(R4, 23); + else + vt = sign_extend32(le32_to_cpu(priv->_4965.statistics. + general.common.temperature), 23); + + IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); + + if (R3 == R1) { + IWL_ERR(priv, "Calibration conflict R1 == R3\n"); + return -1; + } + + /* Calculate temperature in degrees Kelvin, adjust by 97%. + * Add offset to center the adjustment around 0 degrees Centigrade. */ + temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); + temperature /= (R3 - R1); + temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; + + IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n", + temperature, KELVIN_TO_CELSIUS(temperature)); + + return temperature; +} + +/* Adjust Txpower only if temperature variance is greater than threshold. */ +#define IWL_TEMPERATURE_THRESHOLD 3 + +/** + * iwl4965_is_temp_calib_needed - determines if new calibration is needed + * + * If the temperature changed has changed sufficiently, then a recalibration + * is needed. + * + * Assumes caller will replace priv->last_temperature once calibration + * executed. + */ +static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + if (!test_bit(STATUS_STATISTICS, &priv->status)) { + IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n"); + return 0; + } + + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER(priv, "Temperature unchanged\n"); + else + IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff); + + if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { + IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n"); + + return 1; +} + +static void iwl4965_temperature_calib(struct iwl_priv *priv) +{ + s32 temp; + + temp = iwl4965_hw_get_temperature(priv); + if (temp < 0) + return; + + if (priv->temperature != temp) { + if (priv->temperature) + IWL_DEBUG_TEMP(priv, "Temperature changed " + "from %dC to %dC\n", + KELVIN_TO_CELSIUS(priv->temperature), + KELVIN_TO_CELSIUS(temp)); + else + IWL_DEBUG_TEMP(priv, "Temperature " + "initialized to %dC\n", + KELVIN_TO_CELSIUS(temp)); + } + + priv->temperature = temp; + set_bit(STATUS_TEMPERATURE, &priv->status); + + if (!priv->disable_tx_power_cal && + unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + iwl4965_is_temp_calib_needed(priv)) + queue_work(priv->workqueue, &priv->txpower_work); +} + +static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) +{ + switch (cmd_id) { + case REPLY_RXON: + return (u16) sizeof(struct iwl4965_rxon_cmd); + default: + return len; + } +} + +static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data) +{ + struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; + addsta->mode = cmd->mode; + memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); + memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo)); + addsta->station_flags = cmd->station_flags; + addsta->station_flags_msk = cmd->station_flags_msk; + addsta->tid_disable_tx = cmd->tid_disable_tx; + addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; + addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; + addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; + addsta->sleep_tx_count = cmd->sleep_tx_count; + addsta->reserved1 = cpu_to_le16(0); + addsta->reserved2 = cpu_to_le16(0); + + return (u16)sizeof(struct iwl4965_addsta_cmd); +} + +static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp) +{ + return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; +} + +/** + * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue + */ +static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl4965_tx_resp *tx_resp, + int txq_id, u16 start_idx) +{ + u16 status; + struct agg_tx_status *frame_status = tx_resp->u.agg_status; + struct ieee80211_tx_info *info = NULL; + struct ieee80211_hdr *hdr = NULL; + u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + int i, sh, idx; + u16 seq; + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = rate_n_flags; + agg->bitmap = 0; + + /* num frames attempted by Tx command */ + if (agg->frame_count == 1) { + /* Only one frame was attempted; no block-ack will arrive */ + status = le16_to_cpu(frame_status[0].status); + idx = start_idx; + + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", + agg->frame_count, agg->start_idx, idx); + + info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + info->flags |= iwl4965_tx_status_to_mac80211(status); + iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info); + + IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", + status & 0xff, tx_resp->failure_frame); + IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); + + agg->wait_for_ba = 0; + } else { + /* Two or more frames were attempted; expect block-ack */ + u64 bitmap = 0; + int start = agg->start_idx; + + /* Construct bit-map of pending frames within Tx window */ + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le16_to_cpu(frame_status[i].status); + seq = le16_to_cpu(frame_status[i].sequence); + idx = SEQ_TO_INDEX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx); + if (!hdr) { + IWL_ERR(priv, + "BUG_ON idx doesn't point to valid skb" + " idx=%d, txq_id=%d\n", idx, txq_id); + return -1; + } + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IWL_ERR(priv, + "BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", + idx, SEQ_TO_SN(sc), hdr->seq_ctrl); + return -1; + } + + IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", + i, idx, SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= 1ULL << sh; + IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", + start, (unsigned long long)bitmap); + } + + agg->bitmap = bitmap; + agg->start_idx = start; + IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", + agg->frame_count, agg->start_idx, + (unsigned long long)agg->bitmap); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} + +static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int start = 0; + int ret = IWL_INVALID_STATION; + unsigned long flags; + + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC)) + start = IWL_STA_ID; + + if (is_broadcast_ether_addr(addr)) + return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = start; i < priv->hw_params.max_stations; i++) + if (priv->stations[i].used && + (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n", + addr, priv->num_stations); + + out: + /* + * It may be possible that more commands interacting with stations + * arrive before we completed processing the adding of + * station + */ + if (ret != IWL_INVALID_STATION && + (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) || + ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) && + (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) { + IWL_ERR(priv, "Requested station info for sta %d before ready.\n", + ret); + ret = IWL_INVALID_STATION; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + if (priv->iw_mode == NL80211_IFTYPE_STATION) { + return IWL_AP_ID; + } else { + u8 *da = ieee80211_get_DA(hdr); + return iwl4965_find_station(priv, da); + } +} + +/** + * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response + */ +static void iwl4965_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *info; + struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->u.status); + int uninitialized_var(tid); + int sta_id; + int freed; + u8 *qc = NULL; + unsigned long flags; + + if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.write_ptr, + txq->q.read_ptr); + return; + } + + txq->time_stamp = jiffies; + info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); + memset(&info->status, 0, sizeof(info->status)); + + hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index); + if (ieee80211_is_data_qos(hdr->frame_control)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + sta_id = iwl4965_get_ra_sta_id(priv, hdr); + if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { + IWL_ERR(priv, "Station not known\n"); + return; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + if (txq->sched_retry) { + const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); + struct iwl_ht_agg *agg = NULL; + WARN_ON(!qc); + + agg = &priv->stations[sta_id].tid[tid].agg; + + iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); + + /* check if BAR is needed */ + if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status)) + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + + if (txq->q.read_ptr != (scd_ssn & 0xff)) { + index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff, + txq->q.n_bd); + IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " + "%d index %d\n", scd_ssn , index); + freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); + if (qc) + iwl4965_free_tfds_in_queue(priv, sta_id, + tid, freed); + + if (priv->mac80211_registered && + (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) + && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_legacy_wake_queue(priv, txq); + } + } else { + info->status.rates[0].count = tx_resp->failure_frame + 1; + info->flags |= iwl4965_tx_status_to_mac80211(status); + iwl4965_hwrate_to_tx_control(priv, + le32_to_cpu(tx_resp->rate_n_flags), + info); + + IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " + "rate_n_flags 0x%x retries %d\n", + txq_id, + iwl4965_get_tx_fail_reason(status), status, + le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); + else if (sta_id == IWL_INVALID_STATION) + IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); + + if (priv->mac80211_registered && + (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)) + iwl_legacy_wake_queue(priv, txq); + } + if (qc && likely(sta_id != IWL_INVALID_STATION)) + iwl4965_txq_check_empty(priv, sta_id, tid, txq_id); + + iwl4965_check_abort_status(priv, tx_resp->frame_count, status); + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw; + u8 rate __maybe_unused = + iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " + "tsf:0x%.8x%.8x rate:%d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +/* Set up 4965-specific Rx frame reply handlers */ +static void iwl4965_rx_handler_setup(struct iwl_priv *priv) +{ + /* Legacy Rx frames */ + priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; + /* Tx response */ + priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; +} + +static struct iwl_hcmd_ops iwl4965_hcmd = { + .rxon_assoc = iwl4965_send_rxon_assoc, + .commit_rxon = iwl4965_commit_rxon, + .set_rxon_chain = iwl4965_set_rxon_chain, +}; + +static void iwl4965_post_scan(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + iwl_legacy_commit_rxon(priv, ctx); +} + +static void iwl4965_post_associate(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif = ctx->vif; + struct ieee80211_conf *conf = NULL; + int ret = 0; + + if (!vif || !priv->is_open) + return; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_legacy_scan_cancel_timeout(priv, 200); + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + ret = iwl_legacy_send_rxon_timing(priv, ctx); + if (ret) + IWL_WARN(priv, "RXON timing - " + "Attempting to continue.\n"); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + + iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + vif->bss_conf.aid, vif->bss_conf.beacon_int); + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + + iwl_legacy_commit_rxon(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + vif->bss_conf.aid, ctx->active.bssid_addr); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_ADHOC: + iwl4965_send_beacon_cmd(priv); + break; + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, vif->type); + break; + } + + /* the chain noise calibration will enabled PM upon completion + * If chain noise has already been run, then we need to enable + * power management here */ + if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) + iwl_legacy_power_update_mode(priv, false); + + /* Enable Rx differential gain and sensitivity calibrations */ + iwl4965_chain_noise_reset(priv); + priv->start_calib = 1; +} + +static void iwl4965_config_ap(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif = ctx->vif; + int ret = 0; + + lockdep_assert_held(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!iwl_legacy_is_associated_ctx(ctx)) { + + /* RXON - unassoc (to set timing command) */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + /* RXON Timing */ + ret = iwl_legacy_send_rxon_timing(priv, ctx); + if (ret) + IWL_WARN(priv, "RXON timing failed - " + "Attempting to continue.\n"); + + /* AP has all antennas */ + priv->chain_noise_data.active_chains = + priv->hw_params.valid_rx_ant; + iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + ctx->staging.assoc_id = 0; + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* need to send beacon cmd before committing assoc RXON! */ + iwl4965_send_beacon_cmd(priv); + /* restore RXON assoc */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + } + iwl4965_send_beacon_cmd(priv); +} + +static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { + .get_hcmd_size = iwl4965_get_hcmd_size, + .build_addsta_hcmd = iwl4965_build_addsta_hcmd, + .request_scan = iwl4965_request_scan, + .post_scan = iwl4965_post_scan, +}; + +static struct iwl_lib_ops iwl4965_lib = { + .set_hw_params = iwl4965_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, + .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl4965_hw_txq_free_tfd, + .txq_init = iwl4965_hw_tx_queue_init, + .rx_handler_setup = iwl4965_rx_handler_setup, + .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, + .init_alive_start = iwl4965_init_alive_start, + .load_ucode = iwl4965_load_bsm, + .dump_nic_event_log = iwl4965_dump_nic_event_log, + .dump_nic_error_log = iwl4965_dump_nic_error_log, + .dump_fh = iwl4965_dump_fh, + .set_channel_switch = iwl4965_hw_channel_switch, + .apm_ops = { + .init = iwl_legacy_apm_init, + .config = iwl4965_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REGULATORY_BAND_1_CHANNELS, + EEPROM_REGULATORY_BAND_2_CHANNELS, + EEPROM_REGULATORY_BAND_3_CHANNELS, + EEPROM_REGULATORY_BAND_4_CHANNELS, + EEPROM_REGULATORY_BAND_5_CHANNELS, + EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, + EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS + }, + .acquire_semaphore = iwl4965_eeprom_acquire_semaphore, + .release_semaphore = iwl4965_eeprom_release_semaphore, + }, + .send_tx_power = iwl4965_send_tx_power, + .update_chain_flags = iwl4965_update_chain_flags, + .temp_ops = { + .temperature = iwl4965_temperature_calib, + }, + .debugfs_ops = { + .rx_stats_read = iwl4965_ucode_rx_stats_read, + .tx_stats_read = iwl4965_ucode_tx_stats_read, + .general_stats_read = iwl4965_ucode_general_stats_read, + }, + .check_plcp_health = iwl4965_good_plcp_health, +}; + +static const struct iwl_legacy_ops iwl4965_legacy_ops = { + .post_associate = iwl4965_post_associate, + .config_ap = iwl4965_config_ap, + .manage_ibss_station = iwl4965_manage_ibss_station, + .update_bcast_stations = iwl4965_update_bcast_stations, +}; + +struct ieee80211_ops iwl4965_hw_ops = { + .tx = iwl4965_mac_tx, + .start = iwl4965_mac_start, + .stop = iwl4965_mac_stop, + .add_interface = iwl_legacy_mac_add_interface, + .remove_interface = iwl_legacy_mac_remove_interface, + .change_interface = iwl_legacy_mac_change_interface, + .config = iwl_legacy_mac_config, + .configure_filter = iwl4965_configure_filter, + .set_key = iwl4965_mac_set_key, + .update_tkip_key = iwl4965_mac_update_tkip_key, + .conf_tx = iwl_legacy_mac_conf_tx, + .reset_tsf = iwl_legacy_mac_reset_tsf, + .bss_info_changed = iwl_legacy_mac_bss_info_changed, + .ampdu_action = iwl4965_mac_ampdu_action, + .hw_scan = iwl_legacy_mac_hw_scan, + .sta_add = iwl4965_mac_sta_add, + .sta_remove = iwl_legacy_mac_sta_remove, + .channel_switch = iwl4965_mac_channel_switch, + .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, +}; + +static const struct iwl_ops iwl4965_ops = { + .lib = &iwl4965_lib, + .hcmd = &iwl4965_hcmd, + .utils = &iwl4965_hcmd_utils, + .led = &iwl4965_led_ops, + .legacy = &iwl4965_legacy_ops, + .ieee80211_ops = &iwl4965_hw_ops, +}; + +static struct iwl_base_params iwl4965_base_params = { + .eeprom_size = IWL4965_EEPROM_IMG_SIZE, + .num_of_queues = IWL49_NUM_QUEUES, + .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = true, + .led_compensation = 61, + .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .temperature_kelvin = true, + .max_event_log_size = 512, + .ucode_tracing = true, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, +}; + +struct iwl_cfg iwl4965_cfg = { + .name = "Intel(R) Wireless WiFi Link 4965AGN", + .fw_name_pre = IWL4965_FW_PRE, + .ucode_api_max = IWL4965_UCODE_API_MAX, + .ucode_api_min = IWL4965_UCODE_API_MIN, + .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, + .valid_tx_ant = ANT_AB, + .valid_rx_ant = ANT_ABC, + .eeprom_ver = EEPROM_4965_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, + .ops = &iwl4965_ops, + .mod_params = &iwl4965_mod_params, + .base_params = &iwl4965_base_params, + .led_mode = IWL_LED_BLINK, + /* + * Force use of chains B and C for scan RX on 5 GHz band + * because the device has off-channel reception on chain A. + */ + .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, +}; + +/* Module firmware */ +MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h new file mode 100644 index 000000000000..79e206770f71 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965.h @@ -0,0 +1,282 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_4965_h__ +#define __iwl_4965_h__ + +#include "iwl-dev.h" + +/* configuration for the _4965 devices */ +extern struct iwl_cfg iwl4965_cfg; + +extern struct iwl_mod_params iwl4965_mod_params; + +extern struct ieee80211_ops iwl4965_hw_ops; + +/* tx queue */ +void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed); + +/* RXON */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/* uCode */ +int iwl4965_verify_ucode(struct iwl_priv *priv); + +/* lib */ +void iwl4965_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status); + +void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_hw_nic_init(struct iwl_priv *priv); +int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display); + +/* rx */ +void iwl4965_rx_queue_restock(struct iwl_priv *priv); +void iwl4965_rx_replenish(struct iwl_priv *priv); +void iwl4965_rx_replenish_now(struct iwl_priv *priv); +void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_rxq_stop(struct iwl_priv *priv); +int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_rx_handle(struct iwl_priv *priv); + +/* tx */ +void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad); +int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info); +int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); +int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int iwl4965_txq_check_empty(struct iwl_priv *priv, + int sta_id, u8 tid, int txq_id); +void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); +void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv); +int iwl4965_txq_ctx_alloc(struct iwl_priv *priv); +void iwl4965_txq_ctx_reset(struct iwl_priv *priv); +void iwl4965_txq_ctx_stop(struct iwl_priv *priv); +void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask); + +/* + * Acquire priv->lock before calling this function ! + */ +void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index); +/** + * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue + * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed + * @scd_retry: (1) Indicates queue will be used in aggregation mode + * + * NOTE: Acquire priv->lock before calling this function ! + */ +void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry); + +static inline u32 iwl4965_tx_status_to_mac80211(u32 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + return IEEE80211_TX_STAT_ACK; + case TX_STATUS_FAIL_DEST_PS: + return IEEE80211_TX_STAT_TX_FILTERED; + default: + return 0; + } +} + +static inline bool iwl4965_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) || + (status == TX_STATUS_DIRECT_DONE); +} + +u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); + +/* rx */ +void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +bool iwl4965_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl4965_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/* scan */ +int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); + +/* station mgmt */ +int iwl4965_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); + +/* hcmd */ +int iwl4965_send_beacon_cmd(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +const char *iwl4965_get_tx_fail_reason(u32 status); +#else +static inline const char * +iwl4965_get_tx_fail_reason(u32 status) { return ""; } +#endif + +/* station management */ +int iwl4965_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl4965_add_bssid_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r); +int iwl4965_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl4965_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl4965_set_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +int iwl4965_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +void iwl4965_update_tkip_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); +int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, + int sta_id, int tid); +int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn); +int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid); +void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, + int sta_id, int cnt); +int iwl4965_update_bcast_stations(struct iwl_priv *priv); + +/* rate */ +static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx) +{ + return BIT(ant_idx) << RATE_MCS_ANT_POS; +} + +static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} + +static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags) +{ + return cpu_to_le32(flags|(u32)rate); +} + +/* eeprom */ +void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); +int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv); +void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv); +int iwl4965_eeprom_check_version(struct iwl_priv *priv); + +/* mac80211 handlers (for 4965) */ +int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int iwl4965_mac_start(struct ieee80211_hw *hw); +void iwl4965_mac_stop(struct ieee80211_hw *hw); +void iwl4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); +int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); +int iwl4965_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch); + +#endif /* __iwl_4965_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h new file mode 100644 index 000000000000..17a1d504348e --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-commands.h @@ -0,0 +1,3405 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-commands.h) only for uCode API definitions. + * Please use iwl-xxxx-hw.h for hardware-related definitions. + * Please use iwl-dev.h for driver implementation definitions. + */ + +#ifndef __iwl_legacy_commands_h__ +#define __iwl_legacy_commands_h__ + +struct iwl_priv; + +/* uCode version contains 4 values: Major/Minor/API/Serial */ +#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) + + +/* Tx rates */ +#define IWL_CCK_RATES 4 +#define IWL_OFDM_RATES 8 +#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) + +enum { + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, + + /* RXON and QOS commands */ + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, + + /* Multi-Station support */ + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, + + /* Security */ + REPLY_WEPKEY = 0x20, + + /* RX, TX, LEDs */ + REPLY_3945_RX = 0x1b, /* 3945 only */ + REPLY_TX = 0x1c, + REPLY_RATE_SCALE = 0x47, /* 3945 only */ + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ + + /* 802.11h related */ + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, + + /* Power Management */ + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + + /* Scan commands and notifications */ + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* IBSS/AP commands */ + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, + + /* Miscellaneous commands */ + REPLY_TX_PWR_TABLE_CMD = 0x97, + + /* Bluetooth device coexistence config command */ + REPLY_BT_CONFIG = 0x9b, + + /* Statistics */ + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + CARD_STATE_NOTIFICATION = 0xa1, + + /* Missed beacons notification */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, + + REPLY_MAX = 0xff +}; + +/****************************************************************************** + * (0) + * Commonly used structures and definitions: + * Command header, rate_n_flags, txpower + * + *****************************************************************************/ + +/* iwl_cmd_header flags value */ +#define IWL_CMD_FAILED_MSK 0x40 + +#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) +#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) +#define SEQ_TO_INDEX(s) ((s) & 0xff) +#define INDEX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) +#define SEQ_RX_FRAME cpu_to_le16(0x8000) + +/** + * struct iwl_cmd_header + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + */ +struct iwl_cmd_header { + u8 cmd; /* Command ID: REPLY_RXON, etc. */ + u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ + /* + * The driver sets up the sequence number to values of its choosing. + * uCode does not use this value, but passes it back to the driver + * when sending the response to each driver-originated command, so + * the driver can match the response to the command. Since the values + * don't get used by uCode, the driver may set up an arbitrary format. + * + * There is one exception: uCode sets bit 15 when it originates + * the response/notification, i.e. when the response/notification + * is not a direct response to a command sent by the driver. For + * example, uCode issues REPLY_3945_RX when it sends a received frame + * to the driver; it is not a direct response to any driver command. + * + * The Linux driver uses the following format: + * + * 0:7 tfd index - position within TX queue + * 8:12 TX queue id + * 13 reserved + * 14 huge - driver sets this to indicate command is in the + * 'huge' storage at the end of the command buffers + * 15 unsolicited RX or uCode-originated notification + */ + __le16 sequence; + + /* command or response/notification data follows immediately */ + u8 data[0]; +} __packed; + + +/** + * struct iwl3945_tx_power + * + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. + */ +struct iwl3945_tx_power { + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ +} __packed; + +/** + * struct iwl3945_power_per_rate + * + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + */ +struct iwl3945_power_per_rate { + u8 rate; /* plcp */ + struct iwl3945_tx_power tpc; + u8 reserved; +} __packed; + +/** + * iwl4965 rate_n_flags bit fields + * + * rate_n_flags format is used in following iwl4965 commands: + * REPLY_RX (response only) + * REPLY_RX_MPDU (response only) + * REPLY_TX (both command and response) + * REPLY_TX_LINK_QUALITY_CMD + * + * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): + * 2-0: 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * + * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"): + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * + * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + */ +#define RATE_MCS_CODE_MSK 0x7 +#define RATE_MCS_SPATIAL_POS 3 +#define RATE_MCS_SPATIAL_MSK 0x18 +#define RATE_MCS_HT_DUP_POS 5 +#define RATE_MCS_HT_DUP_MSK 0x20 + +/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */ +#define RATE_MCS_FLAGS_POS 8 +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK 0x100 + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK 0x200 + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_MCS_GF_POS 10 +#define RATE_MCS_GF_MSK 0x400 + +/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */ +#define RATE_MCS_HT40_POS 11 +#define RATE_MCS_HT40_MSK 0x800 + +/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */ +#define RATE_MCS_DUP_POS 12 +#define RATE_MCS_DUP_MSK 0x1000 + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK 0x2000 + +/** + * rate_n_flags Tx antenna masks + * 4965 has 2 transmitters + * bit14:16 + */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK 0x04000 +#define RATE_MCS_ANT_B_MSK 0x08000 +#define RATE_MCS_ANT_C_MSK 0x10000 +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) +#define RATE_ANT_NUM 3 + +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 + +#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 +#define IWL_PWR_CCK_ENTRIES 2 + +/** + * union iwl4965_tx_power_dual_stream + * + * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Use __le32 version (struct tx_power_dual_stream) when building command. + * + * Driver provides radio gain and DSP attenuation settings to device in pairs, + * one value for each transmitter chain. The first value is for transmitter A, + * second for transmitter B. + * + * For SISO bit rates, both values in a pair should be identical. + * For MIMO rates, one value may be different from the other, + * in order to balance the Tx output between the two transmitters. + * + * See more details in doc for TXPOWER in iwl-4965-hw.h. + */ +union iwl4965_tx_power_dual_stream { + struct { + u8 radio_tx_gain[2]; + u8 dsp_predis_atten[2]; + } s; + u32 dw; +}; + +/** + * struct tx_power_dual_stream + * + * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * + * Same format as iwl_tx_power_dual_stream, but __le32 + */ +struct tx_power_dual_stream { + __le32 dw; +} __packed; + +/** + * struct iwl4965_tx_power_db + * + * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + */ +struct iwl4965_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; +} __packed; + +/****************************************************************************** + * (0a) + * Alive and Error Commands & Responses: + * + *****************************************************************************/ + +#define UCODE_VALID_OK cpu_to_le32(0x1) +#define INITIALIZE_SUBTYPE (9) + +/* + * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "initialize alive" notification once the initialization + * uCode image has completed its work, and is ready to load the runtime image. + * This is the *first* "alive" notification that the driver will receive after + * rebooting uCode; the "initialize" alive is indicated by subtype field == 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * For 4965, this notification contains important calibration data for + * calculating txpower settings: + * + * 1) Power supply voltage indication. The voltage sensor outputs higher + * values for lower voltage, and vice verse. + * + * 2) Temperature measurement parameters, for each of two channel widths + * (20 MHz and 40 MHz) supported by the radios. Temperature sensing + * is done via one of the receiver chains, and channel width influences + * the results. + * + * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, + * for each of 5 frequency ranges. + */ +struct iwl_init_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* "9" for initialize alive */ + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; + + /* calibration values from "initialize" uCode */ + __le32 voltage; /* signed, higher value is lower voltage */ + __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */ + __le32 therm_r2[2]; /* signed */ + __le32 therm_r3[2]; /* signed */ + __le32 therm_r4[2]; /* signed */ + __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, + * 2 Tx chains */ +} __packed; + + +/** + * REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "alive" notification once the runtime image is ready + * to receive commands from the driver. This is the *second* "alive" + * notification that the driver will receive after rebooting uCode; + * this "alive" is indicated by subtype field != 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * This response includes two pointers to structures within the device's + * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging: + * + * 1) log_event_table_ptr indicates base of the event log. This traces + * a 256-entry history of uCode execution within a circular buffer. + * Its header format is: + * + * __le32 log_size; log capacity (in number of entries) + * __le32 type; (1) timestamp with each entry, (0) no timestamp + * __le32 wraps; # times uCode has wrapped to top of circular buffer + * __le32 write_index; next circular buffer entry that uCode would fill + * + * The header is followed by the circular buffer of log entries. Entries + * with timestamps have the following format: + * + * __le32 event_id; range 0 - 1500 + * __le32 timestamp; low 32 bits of TSF (of network, if associated) + * __le32 data; event_id-specific data value + * + * Entries without timestamps contain only event_id and data. + * + * + * 2) error_event_table_ptr indicates base of the error log. This contains + * information about any uCode error that occurs. For 4965, the format + * of the error log is: + * + * __le32 valid; (nonzero) valid, (0) log is empty + * __le32 error_id; type of error + * __le32 pc; program counter + * __le32 blink1; branch link + * __le32 blink2; branch link + * __le32 ilink1; interrupt link + * __le32 ilink2; interrupt link + * __le32 data1; error-specific data + * __le32 data2; error-specific data + * __le32 line; source code line of error + * __le32 bcon_time; beacon timer + * __le32 tsf_low; network timestamp function timer + * __le32 tsf_hi; network timestamp function timer + * __le32 gp1; GP1 timer register + * __le32 gp2; GP2 timer register + * __le32 gp3; GP3 timer register + * __le32 ucode_ver; uCode version + * __le32 hw_ver; HW Silicon version + * __le32 brd_ver; HW board version + * __le32 log_pc; log program counter + * __le32 frame_ptr; frame pointer + * __le32 stack_ptr; stack pointer + * __le32 hcmd; last host command + * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag + * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag + * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag + * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag + * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt + * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT + * __le32 wait_event; wait event() caller address + * __le32 l2p_control; L2pControlField + * __le32 l2p_duration; L2pDurationField + * __le32 l2p_mhvalid; L2pMhValidBits + * __le32 l2p_addr_match; L2pAddrMatchStat + * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL) + * __le32 u_timestamp; indicate when the date and time of the compilation + * __le32 reserved; + * + * The Linux driver can print both logs to the system log when a uCode error + * occurs. + */ +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* not "9" for runtime alive */ + __le16 reserved2; + __le32 log_event_table_ptr; /* SRAM address for event log */ + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 timestamp; + __le32 is_valid; +} __packed; + +/* + * REPLY_ERROR = 0x2 (response only, not a command) + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_info; + __le64 timestamp; +} __packed; + +/****************************************************************************** + * (1) + * RXON Commands & Responses: + * + *****************************************************************************/ + +/* + * Rx config defines & structure + */ +/* rx_config device types */ +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, +}; + + +#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) +#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) +#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) +#define RXON_RX_CHAIN_VALID_POS (1) +#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4) +#define RXON_RX_CHAIN_FORCE_SEL_POS (4) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10) +#define RXON_RX_CHAIN_CNT_POS (10) +#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12) +#define RXON_RX_CHAIN_MIMO_CNT_POS (12) +#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14) +#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) + +/* rx_config flags */ +/* band & modulation selection */ +#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0) +#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1) +/* auto detection enable */ +#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2) +/* TGg protection when tx */ +#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3) +/* cck short slot & preamble */ +#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4) +#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5) +/* antenna selection */ +#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7) +#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00) +#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9) +/* radar detection enable */ +#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12) +#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13) +/* rx response to host with 8-byte TSF +* (according to ON_AIR deassertion) */ +#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) + + +/* HT flags */ +#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) +#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) + +#define RXON_FLG_HT_OPERATING_MODE_POS (23) + +#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23) +#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23) + +#define RXON_FLG_CHANNEL_MODE_POS (25) +#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25) + +/* channel mode */ +enum { + CHANNEL_MODE_LEGACY = 0, + CHANNEL_MODE_PURE_40 = 1, + CHANNEL_MODE_MIXED = 2, + CHANNEL_MODE_RESERVED = 3, +}; +#define RXON_FLG_CHANNEL_MODE_LEGACY \ + cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_PURE_40 \ + cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_MIXED \ + cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS) + +/* CTS to self (if spec allows) flag */ +#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30) + +/* rx_config filter flags */ +/* accept all data frames */ +#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0) +/* pass control & management to host */ +#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1) +/* accept multi-cast */ +#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2) +/* don't decrypt uni-cast frames */ +#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3) +/* don't decrypt multi-cast frames */ +#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4) +/* STA is associated */ +#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5) +/* transfer to host non bssid beacons in associated state */ +#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) + +/** + * REPLY_RXON = 0x10 (command, has simple generic response) + * + * RXON tunes the radio tuner to a service channel, and sets up a number + * of parameters that are used primarily for Rx, but also for Tx operations. + * + * NOTE: When tuning to a new channel, driver must set the + * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent + * info within the device, including the station tables, tx retry + * rate tables, and txpower tables. Driver must build a new station + * table and txpower table before transmitting anything on the RXON + * channel. + * + * NOTE: All RXONs wipe clean the internal txpower table. Driver must + * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), + * regardless of whether RXON_FILTER_ASSOC_MSK is set. + */ + +struct iwl3945_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 reserved4; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + __le16 reserved5; +} __packed; + +struct iwl4965_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; +} __packed; + +/* Create a common rxon cmd which will be typecast into the 3945 or 4965 + * specific rxon cmd, depending on where it is called from. + */ +struct iwl_legacy_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 reserved4; + u8 reserved5; +} __packed; + + +/* + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + */ +struct iwl3945_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 reserved; +} __packed; + +struct iwl4965_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + __le16 rx_chain_select_flags; + __le16 reserved; +} __packed; + +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 +#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ +#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ + +/* + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + */ +struct iwl_rxon_time_cmd { + __le64 timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + u8 dtim_period; + u8 delta_cp_bss_tbtts; +} __packed; + +/* + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + */ +struct iwl3945_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; +} __packed; + +struct iwl4965_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + struct iwl4965_tx_power_db tx_power; +} __packed; + +/* + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + */ +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; /* 0 - OK, 1 - fail */ +} __packed; + +/****************************************************************************** + * (2) + * Quality-of-Service (QOS) Commands & Responses: + * + *****************************************************************************/ + +/** + * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM + * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd + * + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +} __packed; + +/* QoS flags defines */ +#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) +#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02) +#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10) + +/* Number of Access Categories (AC) (EDCA), queues 0..3 */ +#define AC_NUM 4 + +/* + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + * + * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs + * 0: Background, 1: Best Effort, 2: Video, 3: Voice. + */ +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM]; +} __packed; + +/****************************************************************************** + * (3) + * Add/Modify Stations Commands & Responses: + * + *****************************************************************************/ +/* + * Multi station support + */ + +/* Special, dedicated locations within device's station table */ +#define IWL_AP_ID 0 +#define IWL_STA_ID 2 +#define IWL3945_BROADCAST_ID 24 +#define IWL3945_STATION_COUNT 25 +#define IWL4965_BROADCAST_ID 31 +#define IWL4965_STATION_COUNT 32 + +#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ +#define IWL_INVALID_STATION 255 + +#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) +#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) +#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) +#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) +#define STA_FLG_MAX_AGG_SIZE_POS (19) +#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19) +#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21) +#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22) +#define STA_FLG_AGG_MPDU_DENSITY_POS (23) +#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23) + +/* Use in mode field. 1: modify existing entry, 0: add new station entry */ +#define STA_CONTROL_MODIFY_MSK 0x01 + +/* key flags __le16*/ +#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007) +#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000) +#define STA_KEY_FLG_WEP cpu_to_le16(0x0001) +#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002) +#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003) + +#define STA_KEY_FLG_KEYID_POS 8 +#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800) +/* wep key is either from global key (0) or from station info array (1) */ +#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008) + +/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ +#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) +#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) +#define STA_KEY_MAX_NUM 8 + +/* Flags indicate whether to modify vs. don't change various station params */ +#define STA_MODIFY_KEY_MASK 0x01 +#define STA_MODIFY_TID_DISABLE_TX 0x02 +#define STA_MODIFY_TX_RATE_MSK 0x04 +#define STA_MODIFY_ADDBA_TID_MSK 0x08 +#define STA_MODIFY_DELBA_TID_MSK 0x10 +#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 + +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +struct iwl4965_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + u8 key_offset; + u8 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ +} __packed; + +/** + * struct sta_id_modify + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change + * + * Driver selects unused table index when adding new station, + * or the index to a pre-existing station entry when modifying that station. + * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). + * + * modify_mask flags select which parameters to modify vs. leave alone. + */ +struct sta_id_modify { + u8 addr[ETH_ALEN]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +} __packed; + +/* + * REPLY_ADD_STA = 0x18 (command) + * + * The device contains an internal table of per-station information, + * with info on security keys, aggregation parameters, and Tx rates for + * initial Tx attempt and any retries (4965 devices uses + * REPLY_TX_LINK_QUALITY_CMD, + * 3945 uses REPLY_RATE_SCALE to set up rate tables). + * + * REPLY_ADD_STA sets up the table entry for one station, either creating + * a new entry, or modifying a pre-existing one. + * + * NOTE: RXON command (without "associated" bit set) wipes the station table + * clean. Moving into RF_KILL state does this also. Driver must set up + * new station table before transmitting anything on the RXON channel + * (except active scans or active measurements; those commands carry + * their own txpower/rate setup data). + * + * When getting started on a new channel, driver must set up the + * IWL_BROADCAST_ID entry (last entry in the table). For a client + * station in a BSS, once an AP is selected, driver sets up the AP STA + * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP + * are all that are needed for a BSS client station. If the device is + * used as AP, or in an IBSS network, driver must set up station table + * entries for all STAs in network, starting with index IWL_STA_ID. + */ + +struct iwl3945_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 rate_n_flags; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; +} __packed; + +struct iwl4965_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 reserved1; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __packed; + +/* Wrapper struct for 3945 and 4965 addsta_cmd structures */ +struct iwl_legacy_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 rate_n_flags; /* 3945 only */ + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __packed; + + +#define ADD_STA_SUCCESS_MSK 0x1 +#define ADD_STA_NO_ROOM_IN_TABLE 0x2 +#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 +#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 +/* + * REPLY_ADD_STA = 0x18 (response) + */ +struct iwl_add_sta_resp { + u8 status; /* ADD_STA_* */ +} __packed; + +#define REM_STA_SUCCESS_MSK 0x1 +/* + * REPLY_REM_STA = 0x19 (response) + */ +struct iwl_rem_sta_resp { + u8 status; +} __packed; + +/* + * REPLY_REM_STA = 0x19 (command) + */ +struct iwl_rem_sta_cmd { + u8 num_sta; /* number of removed stations */ + u8 reserved[3]; + u8 addr[ETH_ALEN]; /* MAC addr of the first station */ + u8 reserved2[2]; +} __packed; + +#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) +#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) +#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) +#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) +#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) + +#define IWL_DROP_SINGLE 0 +#define IWL_DROP_SELECTED 1 +#define IWL_DROP_ALL 2 + +/* + * REPLY_WEP_KEY = 0x20 + */ +struct iwl_wep_key { + u8 key_index; + u8 key_offset; + u8 reserved1[2]; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __packed; + +struct iwl_wep_cmd { + u8 num_keys; + u8 global_key_type; + u8 flags; + u8 reserved; + struct iwl_wep_key key[0]; +} __packed; + +#define WEP_KEY_WEP_TYPE 1 +#define WEP_KEYS_MAX 4 +#define WEP_INVALID_OFFSET 0xff +#define WEP_KEY_LEN_64 5 +#define WEP_KEY_LEN_128 13 + +/****************************************************************************** + * (4) + * Rx Responses: + * + *****************************************************************************/ + +#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0) +#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1) + +#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0) +#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) +#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) +#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 +#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 + +#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) +#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) +#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) +#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) +#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) +#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8) + +#define RX_RES_STATUS_STATION_FOUND (1<<6) +#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7) + +#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) +#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) +#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) +#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) +#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) + +#define RX_MPDU_RES_STATUS_ICV_OK (0x20) +#define RX_MPDU_RES_STATUS_MIC_OK (0x40) +#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) +#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) + + +struct iwl3945_rx_frame_stats { + u8 phy_count; + u8 id; + u8 rssi; + u8 agc; + __le16 sig_avg; + __le16 noise_diff; + u8 payload[0]; +} __packed; + +struct iwl3945_rx_frame_hdr { + __le16 channel; + __le16 phy_flags; + u8 reserved1; + u8 rate; + __le16 len; + u8 payload[0]; +} __packed; + +struct iwl3945_rx_frame_end { + __le32 status; + __le64 timestamp; + __le32 beacon_timestamp; +} __packed; + +/* + * REPLY_3945_RX = 0x1b (response only, not a command) + * + * NOTE: DO NOT dereference from casts to this structure + * It is provided only for calculating minimum data set size. + * The actual offsets of the hdr and end are dynamic based on + * stats.phy_count + */ +struct iwl3945_rx_frame { + struct iwl3945_rx_frame_stats stats; + struct iwl3945_rx_frame_hdr hdr; + struct iwl3945_rx_frame_end end; +} __packed; + +#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) + +/* Fixed (non-configurable) rx data from phy */ + +#define IWL49_RX_RES_PHY_CNT 14 +#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IWL49_AGC_DB_POS (7) +struct iwl4965_rx_non_cfg_phy { + __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ + __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ + u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ + u8 pad[0]; +} __packed; + + +/* + * REPLY_RX = 0xc3 (response only, not a command) + * Used only for legacy (non 11n) frames. + */ +struct iwl_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ + u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ + u8 stat_id; /* configurable DSP phy data set ID */ + u8 reserved1; + __le64 timestamp; /* TSF at on air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le16 phy_flags; /* general phy flags: band, modulation, ... */ + __le16 channel; /* channel number */ + u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ + __le32 rate_n_flags; /* RATE_MCS_* */ + __le16 byte_count; /* frame's byte-count */ + __le16 frame_time; /* frame's time on the air */ +} __packed; + +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __packed; + + +/****************************************************************************** + * (5) + * Tx Commands & Responses: + * + * Driver must place each REPLY_TX command into one of the prioritized Tx + * queues in host DRAM, shared between driver and device (see comments for + * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode + * are preparing to transmit, the device pulls the Tx command over the PCI + * bus via one of the device's Tx DMA channels, to fill an internal FIFO + * from which data will be transmitted. + * + * uCode handles all timing and protocol related to control frames + * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler + * handle reception of block-acks; uCode updates the host driver via + * REPLY_COMPRESSED_BA. + * + * uCode handles retrying Tx when an ACK is expected but not received. + * This includes trying lower data rates than the one requested in the Tx + * command, as set up by the REPLY_RATE_SCALE (for 3945) or + * REPLY_TX_LINK_QUALITY_CMD (4965). + * + * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. + * This command must be executed after every RXON command, before Tx can occur. + *****************************************************************************/ + +/* REPLY_TX Tx flags field */ + +/* + * 1: Use Request-To-Send protocol before this frame. + * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. + */ +#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) + +/* + * 1: Transmit Clear-To-Send to self before this frame. + * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. + * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. + */ +#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) + +/* 1: Expect ACK from receiving station + * 0: Don't expect ACK (MAC header's duration field s/b 0) + * Set this for unicast frames, but not broadcast/multicast. */ +#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) + +/* For 4965 devices: + * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). + * Tx command's initial_rate_index indicates first rate to try; + * uCode walks through table for additional Tx attempts. + * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. + * This rate will be used for all Tx attempts; it will not be scaled. */ +#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4) + +/* 1: Expect immediate block-ack. + * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ +#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) + +/* + * 1: Frame requires full Tx-Op protection. + * Set this if either RTS or CTS Tx Flag gets set. + */ +#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) + +/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices. + * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ +#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) +#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9) + +/* 1: uCode overrides sequence control field in MAC header. + * 0: Driver provides sequence control field in MAC header. + * Set this for management frames, non-QOS data frames, non-unicast frames, + * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ +#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) + +/* 1: This frame is non-last MPDU; more fragments are coming. + * 0: Last fragment, or not using fragmentation. */ +#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14) + +/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. + * 0: No TSF required in outgoing frame. + * Set this for transmitting beacons and probe responses. */ +#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16) + +/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword + * alignment of frame's payload data field. + * 0: No pad + * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 + * field (but not both). Driver must align frame data (i.e. data following + * MAC header) to DWORD boundary. */ +#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20) + +/* accelerate aggregation support + * 0 - no CCMP encryption; 1 - CCMP encryption */ +#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22) + +/* HCCA-AP - disable duration overwriting. */ +#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) + + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x03 +#define TX_CMD_SEC_SHIFT 6 +#define TX_CMD_SEC_KEY128 0x08 + +/* + * security overhead sizes + */ +#define WEP_IV_LEN 4 +#define WEP_ICV_LEN 4 +#define CCMP_MIC_LEN 8 +#define TKIP_ICV_LEN 4 + +/* + * REPLY_TX = 0x1c (command) + */ + +struct iwl3945_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + u8 rate; + + /* Index of recipient station in uCode's station table */ + u8 sta_id; + u8 tid_tspec; + u8 sec_ctl; + u8 key[16]; + union { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; + } tkip_mic; + __le32 next_frame_info; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + u8 supp_rates[2]; + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; + +/* + * REPLY_TX = 0x1c (response) + */ +struct iwl3945_tx_resp { + u8 failure_rts; + u8 failure_frame; + u8 bt_kill_count; + u8 rate; + __le32 wireless_media_time; + __le32 status; /* TX status */ +} __packed; + + +/* + * 4965 uCode updates these Tx attempt count values in host DRAM. + * Used for managing Tx retries when expecting block-acks. + * Driver should set these fields to 0. + */ +struct iwl_dram_scratch { + u8 try_cnt; /* Tx attempts */ + u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ + __le16 reserved; +} __packed; + +struct iwl_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + /* uCode may modify this field of the Tx command (in host DRAM!). + * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ + struct iwl_dram_scratch scratch; + + /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* Index of destination station in uCode's station table */ + u8 sta_id; + + /* Type of security encryption: CCM or TKIP */ + u8 sec_ctl; /* TX_CMD_SEC_* */ + + /* + * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial + * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for + * data frames, this field may be used to selectively reduce initial + * rate (via non-0 value) for special frames (e.g. management), while + * still supporting rate scaling for all frames. + */ + u8 initial_rate_index; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + + /* Host DRAM physical address pointer to "scratch" in this command. + * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; + +/* TX command response is sent after *3945* transmission attempts. + * + * NOTES: + * + * TX_STATUS_FAIL_NEXT_FRAG + * + * If the fragment flag in the MAC header for the frame being transmitted + * is set and there is insufficient time to transmit the next frame, the + * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'. + * + * TX_STATUS_FIFO_UNDERRUN + * + * Indicates the host did not provide bytes to the FIFO fast enough while + * a TX was in progress. + * + * TX_STATUS_FAIL_MGMNT_ABORT + * + * This status is only possible if the ABORT ON MGMT RX parameter was + * set to true with the TX command. + * + * If the MSB of the status parameter is set then an abort sequence is + * required. This sequence consists of the host activating the TX Abort + * control line, and then waiting for the TX Abort command response. This + * indicates that a the device is no longer in a transmit state, and that the + * command FIFO has been cleared. The host must then deactivate the TX Abort + * control line. Receiving is still allowed in this case. + */ +enum { + TX_3945_STATUS_SUCCESS = 0x01, + TX_3945_STATUS_DIRECT_DONE = 0x02, + TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85, + TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86, + TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_3945_STATUS_FAIL_DEST_PS = 0x88, + TX_3945_STATUS_FAIL_ABORTED = 0x89, + TX_3945_STATUS_FAIL_BT_RETRY = 0x8a, + TX_3945_STATUS_FAIL_STA_INVALID = 0x8b, + TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e, + TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_3945_STATUS_FAIL_TX_LOCKED = 0x90, + TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +/* + * TX command response is sent after *4965* transmission attempts. + * + * both postpone and abort status are expected behavior from uCode. there is + * no special operation required from driver; except for RFKILL_FLUSH, + * which required tx flush host command to flush all the tx frames in queues + */ +enum { + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + /* postpone TX */ + TX_STATUS_POSTPONE_DELAY = 0x40, + TX_STATUS_POSTPONE_FEW_BYTES = 0x41, + TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, + TX_STATUS_POSTPONE_CALC_TTAK = 0x44, + /* abort TX */ + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_STATUS_FAIL_DRAIN_FLOW = 0x85, + TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_HOST_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +#define TX_PACKET_MODE_REGULAR 0x0000 +#define TX_PACKET_MODE_BURST_SEQ 0x0100 +#define TX_PACKET_MODE_BURST_FIRST 0x0200 + +enum { + TX_POWER_PA_NOT_ACTIVE = 0x0, +}; + +enum { + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_DELAY_MSK = 0x00000040, + TX_STATUS_ABORT_MSK = 0x00000080, + TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ + TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ + TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ +}; + +/* ******************************* + * TX aggregation status + ******************************* */ + +enum { + AGG_TX_STATE_TRANSMITTED = 0x00, + AGG_TX_STATE_UNDERRUN_MSK = 0x01, + AGG_TX_STATE_FEW_BYTES_MSK = 0x04, + AGG_TX_STATE_ABORT_MSK = 0x08, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, + AGG_TX_STATE_SCD_QUERY_MSK = 0x80, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, + AGG_TX_STATE_RESPONSE_MSK = 0x1ff, + AGG_TX_STATE_DUMP_TX_MSK = 0x200, + AGG_TX_STATE_DELAY_TX_MSK = 0x400 +}; + +#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */ +#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */ + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK) + +/* # tx attempts for first frame in aggregation */ +#define AGG_TX_STATE_TRY_CNT_POS 12 +#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 + +/* Command ID and sequence number of Tx command for this frame */ +#define AGG_TX_STATE_SEQ_NUM_POS 16 +#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for + * a single frame. Multiple attempts, at various bit rates, may have + * been made for this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for + * 2 or more frames that used block-acknowledge. All frames were + * transmitted at same rate. Rate scaling may have been used if first + * frame in this new agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered here; + * block-ack has not been received by the time the 4965 device records + * this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the sending station (this 4965 device), rather than whether it was + * received successfully by the destination station. + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __packed; + +struct iwl4965_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ + u8 failure_rts; /* # failures due to unsuccessful RTS */ + u8 failure_frame; /* # failures due to no ACK (unused for agg) */ + + /* For non-agg: Rate at which frame was successful. + * For agg: Rate at which all frames were transmitted. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* For non-agg: RTS + CTS + frame tx attempts time + ACK. + * For agg: RTS + CTS + aggregation tx time + block-ack time. */ + __le16 wireless_media_time; /* uSecs */ + + __le16 reserved; + __le32 pa_power1; /* RF power amplifier measurement (not used) */ + __le32 pa_power2; + + /* + * For non-agg: frame status TX_STATUS_* + * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status + * fields follow this one, up to frame_count. + * Bit fields: + * 11- 0: AGG_TX_STATE_* status code + * 15-12: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a + * member of a previous aggregation block). If rate + * scaling is used, retry count indicates the rate + * table entry used for all frames in the new agg. + * 31-16: Sequence # for this frame's Tx cmd (not SSN!) + */ + union { + __le32 status; + struct agg_tx_status agg_status[0]; /* for each agg frame */ + } u; +} __packed; + +/* + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + * + * Reports Block-Acknowledge from recipient station + */ +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + + /* Index of recipient (BA-sending) station in uCode's station table */ + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; +} __packed; + +/* + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + * + * See details under "TXPOWER" in iwl-4965-hw.h. + */ + +struct iwl3945_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; +} __packed; + +struct iwl4965_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; + struct iwl4965_tx_power_db tx_power; +} __packed; + + +/** + * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response + * + * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) + * + * NOTE: The table of rates passed to the uCode via the + * RATE_SCALE command sets up the corresponding order of + * rates used for all related commands, including rate + * masks, etc. + * + * For example, if you set 9MB (PLCP 0x0f) as the first + * rate in the rate table, the bit mask for that rate + * when passed through ofdm_basic_rates on the REPLY_RXON + * command would be bit 0 (1 << 0) + */ +struct iwl3945_rate_scaling_info { + __le16 rate_n_flags; + u8 try_cnt; + u8 next_rate_index; +} __packed; + +struct iwl3945_rate_scaling_cmd { + u8 table_id; + u8 reserved[3]; + struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; +} __packed; + + +/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ +#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) + +/* # of EDCA prioritized tx fifos */ +#define LINK_QUAL_AC_NUM AC_NUM + +/* # entries in rate scale table to support Tx retries */ +#define LINK_QUAL_MAX_RETRY_NUM 16 + +/* Tx antenna selection values */ +#define LINK_QUAL_ANT_A_MSK (1 << 0) +#define LINK_QUAL_ANT_B_MSK (1 << 1) +#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + + +/** + * struct iwl_link_qual_general_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_general_params { + u8 flags; + + /* No entries at or above this (driver chosen) index contain MIMO */ + u8 mimo_delimiter; + + /* Best single antenna to use for single stream (legacy, SISO). */ + u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* Best antennas to use for MIMO (unused for 4965, assumes both). */ + u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* + * If driver needs to use different initial rates for different + * EDCA QOS access categories (as implemented by tx fifos 0-3), + * this table will set that up, by indicating the indexes in the + * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. + * Otherwise, driver should set all entries to 0. + * + * Entry usage: + * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice + * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. + */ + u8 start_rate_index[LINK_QUAL_AC_NUM]; +} __packed; + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) +#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) + +#define LINK_QUAL_AGG_DISABLE_START_DEF (3) +#define LINK_QUAL_AGG_DISABLE_START_MAX (255) +#define LINK_QUAL_AGG_DISABLE_START_MIN (0) + +#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31) +#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) + +/** + * struct iwl_link_qual_agg_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_agg_params { + + /* + *Maximum number of uSec in aggregation. + * default set to 4000 (4 milliseconds) if not configured in .cfg + */ + __le16 agg_time_limit; + + /* + * Number of Tx retries allowed for a frame, before that frame will + * no longer be considered for the start of an aggregation sequence + * (scheduler will then try to tx it as single frame). + * Driver should set this to 3. + */ + u8 agg_dis_start_th; + + /* + * Maximum number of frames in aggregation. + * 0 = no limit (default). 1 = no aggregation. + * Other values = max # frames in aggregation. + */ + u8 agg_frame_cnt_limit; + + __le32 reserved; +} __packed; + +/* + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + * + * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. + * + * Each station in the 4965 device's internal station table has its own table + * of 16 + * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when + * an ACK is not received. This command replaces the entire table for + * one station. + * + * NOTE: Station must already be in 4965 device's station table. + * Use REPLY_ADD_STA. + * + * The rate scaling procedures described below work well. Of course, other + * procedures are possible, and may work better for particular environments. + * + * + * FILLING THE RATE TABLE + * + * Given a particular initial rate and mode, as determined by the rate + * scaling algorithm described below, the Linux driver uses the following + * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the + * Link Quality command: + * + * + * 1) If using High-throughput (HT) (SISO or MIMO) initial rate: + * a) Use this same initial rate for first 3 entries. + * b) Find next lower available rate using same mode (SISO or MIMO), + * use for next 3 entries. If no lower rate available, switch to + * legacy mode (no HT40 channel, no MIMO, no short guard interval). + * c) If using MIMO, set command's mimo_delimiter to number of entries + * using MIMO (3 or 6). + * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel, + * no MIMO, no short guard interval), at the next lower bit rate + * (e.g. if second HT bit rate was 54, try 48 legacy), and follow + * legacy procedure for remaining table entries. + * + * 2) If using legacy initial rate: + * a) Use the initial rate for only one entry. + * b) For each following entry, reduce the rate to next lower available + * rate, until reaching the lowest available rate. + * c) When reducing rate, also switch antenna selection. + * d) Once lowest available rate is reached, repeat this rate until + * rate table is filled (16 entries), switching antenna each entry. + * + * + * ACCUMULATING HISTORY + * + * The rate scaling algorithm for 4965 devices, as implemented in Linux driver, + * uses two sets of frame Tx success history: One for the current/active + * modulation mode, and one for a speculative/search mode that is being + * attempted. If the speculative mode turns out to be more effective (i.e. + * actual transfer rate is better), then the driver continues to use the + * speculative mode as the new current active mode. + * + * Each history set contains, separately for each possible rate, data for a + * sliding window of the 62 most recent tx attempts at that rate. The data + * includes a shifting bitmap of success(1)/failure(0), and sums of successful + * and attempted frames, from which the driver can additionally calculate a + * success ratio (success / attempted) and number of failures + * (attempted - success), and control the size of the window (attempted). + * The driver uses the bit map to remove successes from the success sum, as + * the oldest tx attempts fall out of the window. + * + * When the 4965 device makes multiple tx attempts for a given frame, each + * attempt might be at a different rate, and have different modulation + * characteristics (e.g. antenna, fat channel, short guard interval), as set + * up in the rate scaling table in the Link Quality command. The driver must + * determine which rate table entry was used for each tx attempt, to determine + * which rate-specific history to update, and record only those attempts that + * match the modulation characteristics of the history set. + * + * When using block-ack (aggregation), all frames are transmitted at the same + * rate, since there is no per-attempt acknowledgment from the destination + * station. The Tx response struct iwl_tx_resp indicates the Tx rate in + * rate_n_flags field. After receiving a block-ack, the driver can update + * history for the entire block all at once. + * + * + * FINDING BEST STARTING RATE: + * + * When working with a selected initial modulation mode (see below), the + * driver attempts to find a best initial rate. The initial rate is the + * first entry in the Link Quality command's rate table. + * + * 1) Calculate actual throughput (success ratio * expected throughput, see + * table below) for current initial rate. Do this only if enough frames + * have been attempted to make the value meaningful: at least 6 failed + * tx attempts, or at least 8 successes. If not enough, don't try rate + * scaling yet. + * + * 2) Find available rates adjacent to current initial rate. Available means: + * a) supported by hardware && + * b) supported by association && + * c) within any constraints selected by user + * + * 3) Gather measured throughputs for adjacent rates. These might not have + * enough history to calculate a throughput. That's okay, we might try + * using one of them anyway! + * + * 4) Try decreasing rate if, for current rate: + * a) success ratio is < 15% || + * b) lower adjacent rate has better measured throughput || + * c) higher adjacent rate has worse throughput, and lower is unmeasured + * + * As a sanity check, if decrease was determined above, leave rate + * unchanged if: + * a) lower rate unavailable + * b) success ratio at current rate > 85% (very good) + * c) current measured throughput is better than expected throughput + * of lower rate (under perfect 100% tx conditions, see table below) + * + * 5) Try increasing rate if, for current rate: + * a) success ratio is < 15% || + * b) both adjacent rates' throughputs are unmeasured (try it!) || + * b) higher adjacent rate has better measured throughput || + * c) lower adjacent rate has worse throughput, and higher is unmeasured + * + * As a sanity check, if increase was determined above, leave rate + * unchanged if: + * a) success ratio at current rate < 70%. This is not particularly + * good performance; higher rate is sure to have poorer success. + * + * 6) Re-evaluate the rate after each tx frame. If working with block- + * acknowledge, history and statistics may be calculated for the entire + * block (including prior history that fits within the history windows), + * before re-evaluation. + * + * FINDING BEST STARTING MODULATION MODE: + * + * After working with a modulation mode for a "while" (and doing rate scaling), + * the driver searches for a new initial mode in an attempt to improve + * throughput. The "while" is measured by numbers of attempted frames: + * + * For legacy mode, search for new mode after: + * 480 successful frames, or 160 failed frames + * For high-throughput modes (SISO or MIMO), search for new mode after: + * 4500 successful frames, or 400 failed frames + * + * Mode switch possibilities are (3 for each mode): + * + * For legacy: + * Change antenna, try SISO (if HT association), try MIMO (if HT association) + * For SISO: + * Change antenna, try MIMO, try shortened guard interval (SGI) + * For MIMO: + * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI) + * + * When trying a new mode, use the same bit rate as the old/current mode when + * trying antenna switches and shortened guard interval. When switching to + * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate + * for which the expected throughput (under perfect conditions) is about the + * same or slightly better than the actual measured throughput delivered by + * the old/current mode. + * + * Actual throughput can be estimated by multiplying the expected throughput + * by the success ratio (successful / attempted tx frames). Frame size is + * not considered in this calculation; it assumes that frame size will average + * out to be fairly consistent over several samples. The following are + * metric values for expected throughput assuming 100% success ratio. + * Only G band has support for CCK rates: + * + * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60 + * + * G: 7 13 35 58 40 57 72 98 121 154 177 186 186 + * A: 0 0 0 0 40 57 72 98 121 154 177 186 186 + * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202 + * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211 + * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251 + * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257 + * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257 + * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264 + * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289 + * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293 + * + * After the new mode has been tried for a short while (minimum of 6 failed + * frames or 8 successful frames), compare success ratio and actual throughput + * estimate of the new mode with the old. If either is better with the new + * mode, continue to use the new mode. + * + * Continue comparing modes until all 3 possibilities have been tried. + * If moving from legacy to HT, try all 3 possibilities from the new HT + * mode. After trying all 3, a best mode is found. Continue to use this mode + * for the longer "while" described above (e.g. 480 successful frames for + * legacy), and then repeat the search process. + * + */ +struct iwl_link_quality_cmd { + + /* Index of destination/recipient station in uCode's station table */ + u8 sta_id; + u8 reserved1; + __le16 control; /* not used */ + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + + /* + * Rate info; when using rate-scaling, Tx command's initial_rate_index + * specifies 1st Tx rate attempted, via index into this table. + * 4965 devices works its way through table when retrying Tx. + */ + struct { + __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ + } rs_table[LINK_QUAL_MAX_RETRY_NUM]; + __le32 reserved2; +} __packed; + +/* + * BT configuration enable flags: + * bit 0 - 1: BT channel announcement enabled + * 0: disable + * bit 1 - 1: priority of BT device enabled + * 0: disable + */ +#define BT_COEX_DISABLE (0x0) +#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0) +#define BT_ENABLE_PRIORITY BIT(1) + +#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY) + +#define BT_LEAD_TIME_DEF (0x1E) + +#define BT_MAX_KILL_DEF (0x5) + +/* + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + * + * 3945 and 4965 devices support hardware handshake with Bluetooth device on + * same platform. Bluetooth device alerts wireless device when it will Tx; + * wireless device can delay or kill its own Tx to accommodate. + */ +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +} __packed; + + +/****************************************************************************** + * (6) + * Spectrum Management (802.11h) Commands, Responses, Notifications: + * + *****************************************************************************/ + +/* + * Spectrum Management + */ +#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ + RXON_FILTER_CTL2HOST_MSK | \ + RXON_FILTER_ACCEPT_GRP_MSK | \ + RXON_FILTER_DIS_DECRYPT_MSK | \ + RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ + RXON_FILTER_ASSOC_MSK | \ + RXON_FILTER_BCON_AWARE_MSK) + +struct iwl_measure_channel { + __le32 duration; /* measurement duration in extended beacon + * format */ + u8 channel; /* channel to measure */ + u8 type; /* see enum iwl_measure_type */ + __le16 reserved; +} __packed; + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + */ +struct iwl_spectrum_cmd { + __le16 len; /* number of bytes starting from token */ + u8 token; /* token id */ + u8 id; /* measurement id -- 0 or 1 */ + u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ + u8 periodic; /* 1 = periodic */ + __le16 path_loss_timeout; + __le32 start_time; /* start time in extended beacon format */ + __le32 reserved2; + __le32 flags; /* rxon flags */ + __le32 filter_flags; /* rxon filter flags */ + __le16 channel_count; /* minimum 1, maximum 10 */ + __le16 reserved3; + struct iwl_measure_channel channels[10]; +} __packed; + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + */ +struct iwl_spectrum_resp { + u8 token; + u8 id; /* id of the prior command replaced, or 0xff */ + __le16 status; /* 0 - command will be handled + * 1 - cannot handle (conflicts with another + * measurement) */ +} __packed; + +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, +}; + +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, + /* 4-5 reserved */ + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, +}; + +#define NUM_ELEMENTS_IN_HISTOGRAM 8 + +struct iwl_measurement_histogram { + __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ + __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ +} __packed; + +/* clear channel availability counters */ +struct iwl_measurement_cca_counters { + __le32 ofdm; + __le32 cck; +} __packed; + +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), + /* bits 5:6 are reserved */ + IWL_MEASURE_IDLE = (1 << 7), +}; + +/* + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + */ +struct iwl_spectrum_notification { + u8 id; /* measurement id -- 0 or 1 */ + u8 token; + u8 channel_index; /* index in measurement channel list */ + u8 state; /* 0 - start, 1 - stop */ + __le32 start_time; /* lower 32-bits of TSF */ + u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ + u8 channel; + u8 type; /* see enum iwl_measurement_type */ + u8 reserved1; + /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only + * valid if applicable for measurement type requested. */ + __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ + __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ + __le32 cca_time; /* channel load time in usecs */ + u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - + * unidentified */ + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; /* lower 32-bits of TSF */ + __le32 status; /* see iwl_measurement_status */ +} __packed; + +/****************************************************************************** + * (7) + * Power Management Commands, Responses, Notifications: + * + *****************************************************************************/ + +/** + * struct iwl_powertable_cmd - Power Table Command + * @flags: See below: + * + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * PM allow: + * bit 0 - '0' Driver not allow power management + * '1' Driver allow PM (use rest of parameters) + * + * uCode send sleep notifications: + * bit 1 - '0' Don't send sleep notification + * '1' send sleep notification (SEND_PM_NOTIFICATION) + * + * Sleep over DTIM + * bit 2 - '0' PM have to walk up every DTIM + * '1' PM could sleep over DTIM till listen Interval. + * + * PCI power managed + * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1) + * '1' !(PCI_CFG_LINK_CTRL & 0x1) + * + * Fast PD + * bit 4 - '1' Put radio to sleep when receiving frame for others + * + * Force sleep Modes + * bit 31/30- '00' use both mac/xtal sleeps + * '01' force Mac sleep + * '10' force xtal sleep + * '11' Illegal set + * + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * ucode assume sleep over DTIM is allowed and we don't need to wake up + * for every DTIM. + */ +#define IWL_POWER_VEC_SIZE 5 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1)) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) +#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) +#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) +#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5)) +#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6)) +#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7)) + +struct iwl3945_powertable_cmd { + __le16 flags; + u8 reserved[2]; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; +} __packed; + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; /* 3945 reserved */ + u8 debug_flags; /* 3945 reserved */ + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 keep_alive_beacons; +} __packed; + +/* + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * all devices identical. + */ +struct iwl_sleep_notification { + u8 pm_sleep_mode; + u8 pm_wakeup_src; + __le16 reserved; + __le32 sleep_time; + __le32 tsf_low; + __le32 bcon_timer; +} __packed; + +/* Sleep states. all devices identical. */ +enum { + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, + /* 3 reserved */ + IWL_PM_NUM_OF_MODES = 12, +}; + +/* + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; + +#define HW_CARD_DISABLED 0x01 +#define SW_CARD_DISABLED 0x02 +#define CT_CARD_DISABLED 0x04 +#define RXON_CARD_DISABLED 0x10 + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __packed; + +/****************************************************************************** + * (8) + * Scan Commands, Responses, Notifications: + * + *****************************************************************************/ + +#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) +#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) + +/** + * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table + * + * One for each channel in the scan list. + * Each channel can independently select: + * 1) SSID for directed active scans + * 2) Txpower setting (for rate specified within Tx command) + * 3) How long to stay on-channel (behavior may be modified by quiet_time, + * quiet_plcp_th, good_CRC_th) + * + * To avoid uCode errors, make sure the following are true (see comments + * under struct iwl_scan_cmd about max_out_time and quiet_time): + * 1) If using passive_dwell (i.e. passive_dwell != 0): + * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) + * 2) quiet_time <= active_dwell + * 3) If restricting off-channel time (i.e. max_out_time !=0): + * passive_dwell < max_out_time + * active_dwell < max_out_time + */ +struct iwl3945_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:4 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 5:7 reserved + */ + u8 type; + u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ + struct iwl3945_tx_power tpc; + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __packed; + +/* set number of direct probes u8 type */ +#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) + +struct iwl_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:20 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 21:31 reserved + */ + __le32 type; + __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __packed; + +/* set number of direct probes __le32 type */ +#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in + * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 (4) entries. + * SSID IEs get transmitted in reverse order of entry. + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +} __packed; + +#define PROBE_OPTION_MAX_3945 4 +#define PROBE_OPTION_MAX 20 +#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) +#define IWL_GOOD_CRC_TH_DISABLED 0 +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) +#define IWL_MAX_SCAN_SIZE 1024 +#define IWL_MAX_CMD_SIZE 4096 + +/* + * REPLY_SCAN_CMD = 0x80 (command) + * + * The hardware scan command is very powerful; the driver can set it up to + * maintain (relatively) normal network traffic while doing a scan in the + * background. The max_out_time and suspend_time control the ratio of how + * long the device stays on an associated network channel ("service channel") + * vs. how long it's away from the service channel, i.e. tuned to other channels + * for scanning. + * + * max_out_time is the max time off-channel (in usec), and suspend_time + * is how long (in "extended beacon" format) that the scan is "suspended" + * after returning to the service channel. That is, suspend_time is the + * time that we stay on the service channel, doing normal work, between + * scan segments. The driver may set these parameters differently to support + * scanning when associated vs. not associated, and light vs. heavy traffic + * loads when associated. + * + * After receiving this command, the device's scan engine does the following; + * + * 1) Sends SCAN_START notification to driver + * 2) Checks to see if it has time to do scan for one channel + * 3) Sends NULL packet, with power-save (PS) bit set to 1, + * to tell AP that we're going off-channel + * 4) Tunes to first channel in scan list, does active or passive scan + * 5) Sends SCAN_RESULT notification to driver + * 6) Checks to see if it has time to do scan on *next* channel in list + * 7) Repeats 4-6 until it no longer has time to scan the next channel + * before max_out_time expires + * 8) Returns to service channel + * 9) Sends NULL packet with PS=0 to tell AP that we're back + * 10) Stays on service channel until suspend_time expires + * 11) Repeats entire process 2-10 until list is complete + * 12) Sends SCAN_COMPLETE notification + * + * For fast, efficient scans, the scan command also has support for staying on + * a channel for just a short time, if doing active scanning and getting no + * responses to the transmitted probe request. This time is controlled by + * quiet_time, and the number of received packets below which a channel is + * considered "quiet" is controlled by quiet_plcp_threshold. + * + * For active scanning on channels that have regulatory restrictions against + * blindly transmitting, the scan can listen before transmitting, to make sure + * that there is already legitimate activity on the channel. If enough + * packets are cleanly received on the channel (controlled by good_CRC_th, + * typical value 1), the scan engine starts transmitting probe requests. + * + * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. + * + * To avoid uCode errors, see timing restrictions described under + * struct iwl_scan_channel. + */ + +struct iwl3945_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 reserved1; + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service channel: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl3945_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl3945_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __packed; + +struct iwl_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 rx_chain; /* RXON_RX_CHAIN_* */ + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service chnl: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __packed; + +/* Can abort will notify by complete notification with abort status. */ +#define CAN_ABORT_STATUS cpu_to_le32(0x1) +/* complete notification statuses */ +#define ABORT_STATUS 0x2 + +/* + * REPLY_SCAN_CMD = 0x80 (response) + */ +struct iwl_scanreq_notification { + __le32 status; /* 1: okay, 2: cannot fulfill request */ +} __packed; + +/* + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + */ +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __packed; + +#define SCAN_OWNER_STATUS 0x1; +#define MEASURE_OWNER_STATUS 0x2; + +#define IWL_PROBE_STATUS_OK 0 +#define IWL_PROBE_STATUS_TX_FAILED BIT(0) +/* error statuses combined with TX_FAILED */ +#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) +#define IWL_PROBE_STATUS_FAIL_BT BIT(2) + +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +/* + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + */ +struct iwl_scanresults_notification { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; /* not enough time to send */ + __le32 tsf_low; + __le32 tsf_high; + __le32 statistics[NUMBER_OF_STATISTICS]; +} __packed; + +/* + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + */ +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +} __packed; + + +/****************************************************************************** + * (9) + * IBSS/AP Commands and Notifications: + * + *****************************************************************************/ + +enum iwl_ibss_manager { + IWL_NOT_IBSS_MANAGER = 0, + IWL_IBSS_MANAGER = 1, +}; + +/* + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + */ + +struct iwl3945_beacon_notif { + struct iwl3945_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __packed; + +struct iwl4965_beacon_notif { + struct iwl4965_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __packed; + +/* + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + */ + +struct iwl3945_tx_beacon_cmd { + struct iwl3945_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __packed; + +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __packed; + +/****************************************************************************** + * (10) + * Statistics Commands and Notifications: + * + *****************************************************************************/ + +#define IWL_TEMP_CONVERT 260 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/* Used for passing to driver number of successes and failures per rate */ +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __packed; + +/* statistics command response */ + +struct iwl39_statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; +} __packed; + +struct iwl39_statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ +} __packed; + +struct iwl39_statistics_rx { + struct iwl39_statistics_rx_phy ofdm; + struct iwl39_statistics_rx_phy cck; + struct iwl39_statistics_rx_non_phy general; +} __packed; + +struct iwl39_statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; +} __packed; + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; + +struct iwl39_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; +} __packed; + +struct iwl39_statistics_general { + __le32 temperature; + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct iwl39_statistics_div div; +} __packed; + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +} __packed; + +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; + +#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) + +struct statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +} __packed; + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; + struct statistics_rx_ht_phy ofdm_ht; +} __packed; + +/** + * struct statistics_tx_power - current tx power + * + * @ant_a: current tx power on chain a in 1/2 dB step + * @ant_b: current tx power on chain b in 1/2 dB step + * @ant_c: current tx power on chain c in 1/2 dB step + */ +struct statistics_tx_power { + u8 ant_a; + u8 ant_b; + u8 ant_c; + u8 reserved; +} __packed; + +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; +} __packed; + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; + + __le32 reserved1; +} __packed; + + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 reserved1; + __le32 reserved2; +} __packed; + +struct statistics_general_common { + __le32 temperature; /* radio temperature */ + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; +} __packed; + +struct statistics_general { + struct statistics_general_common common; + __le32 reserved2; + __le32 reserved3; +} __packed; + +#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) +#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) +#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) + +/* + * REPLY_STATISTICS_CMD = 0x9c, + * all devices identical. + * + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * + * If the CLEAR_STATS configuration flag is set, uCode will clear its + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * + * If the DISABLE_NOTIF configuration flag is set, uCode will not issue + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + */ +#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ +} __packed; + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ +#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) + +struct iwl3945_notif_statistics { + __le32 flag; + struct iwl39_statistics_rx rx; + struct iwl39_statistics_tx tx; + struct iwl39_statistics_general general; +} __packed; + +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +} __packed; + +/* + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + * + * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed + * in regardless of how many missed beacons, which mean when driver receive the + * notification, inside the command, it can find all the beacons information + * which include number of total missed beacons, number of consecutive missed + * beacons, number of beacons received and number of beacons expected to + * receive. + * + * If uCode detected consecutive_missed_beacons > 5, it will reset the radio + * in order to bring the radio/PHY back to working state; which has no relation + * to when driver will perform sensitivity calibration. + * + * Driver should set it own missed_beacon_threshold to decide when to perform + * sensitivity calibration based on number of consecutive missed beacons in + * order to improve overall performance, especially in noisy environment. + * + */ + +#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) +#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) +#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF + +struct iwl_missed_beacon_notif { + __le32 consecutive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; + + +/****************************************************************************** + * (11) + * Rx Calibration Commands: + * + * With the uCode used for open source drivers, most Tx calibration (except + * for Tx Power) and most Rx calibration is done by uCode during the + * "initialize" phase of uCode boot. Driver must calibrate only: + * + * 1) Tx power (depends on temperature), described elsewhere + * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas) + * 3) Receiver sensitivity (to optimize signal detection) + * + *****************************************************************************/ + +/** + * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) + * + * This command sets up the Rx signal detector for a sensitivity level that + * is high enough to lock onto all signals within the associated network, + * but low enough to ignore signals that are below a certain threshold, so as + * not to have too many "false alarms". False alarms are signals that the + * Rx DSP tries to lock onto, but then discards after determining that they + * are noise. + * + * The optimum number of false alarms is between 5 and 50 per 200 TUs + * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e. + * time listening, not transmitting). Driver must adjust sensitivity so that + * the ratio of actual false alarms to actual Rx time falls within this range. + * + * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each + * received beacon. These provide information to the driver to analyze the + * sensitivity. Don't analyze statistics that come in from scanning, or any + * other non-associated-network source. Pertinent statistics include: + * + * From "general" statistics (struct statistics_rx_non_phy): + * + * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) + * Measure of energy of desired signal. Used for establishing a level + * below which the device does not detect signals. + * + * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB) + * Measure of background noise in silent period after beacon. + * + * channel_load + * uSecs of actual Rx time during beacon period (varies according to + * how much time was spent transmitting). + * + * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: + * + * false_alarm_cnt + * Signal locks abandoned early (before phy-level header). + * + * plcp_err + * Signal locks abandoned late (during phy-level header). + * + * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from + * beacon to beacon, i.e. each value is an accumulation of all errors + * before and including the latest beacon. Values will wrap around to 0 + * after counting up to 2^32 - 1. Driver must differentiate vs. + * previous beacon's values to determine # false alarms in the current + * beacon period. + * + * Total number of false alarms = false_alarms + plcp_errs + * + * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd + * (notice that the start points for OFDM are at or close to settings for + * maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 + * + * If actual rate of OFDM false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), reduce sensitivity + * by *adding* 1 to all 4 of the table entries above, up to the max for + * each entry. Conversely, if false alarm rate is too low (less than 5 + * for each 204.8 msecs listening), *subtract* 1 from each entry to + * increase sensitivity. + * + * For CCK sensitivity, keep track of the following: + * + * 1). 20-beacon history of maximum background noise, indicated by + * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the + * 3 receivers. For any given beacon, the "silence reference" is + * the maximum of last 60 samples (20 beacons * 3 receivers). + * + * 2). 10-beacon history of strongest signal level, as indicated + * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers, + * i.e. the strength of the signal through the best receiver at the + * moment. These measurements are "upside down", with lower values + * for stronger signals, so max energy will be *minimum* value. + * + * Then for any given beacon, the driver must determine the *weakest* + * of the strongest signals; this is the minimum level that needs to be + * successfully detected, when using the best receiver at the moment. + * "Max cck energy" is the maximum (higher value means lower energy!) + * of the last 10 minima. Once this is determined, driver must add + * a little margin by adding "6" to it. + * + * 3). Number of consecutive beacon periods with too few false alarms. + * Reset this to 0 at the first beacon period that falls within the + * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). + * + * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd + * (notice that the start points for CCK are at maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 + * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 + * + * If actual rate of CCK false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), method for reducing + * sensitivity is: + * + * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * up to max 400. + * + * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, + * sensitivity has been reduced a significant amount; bring it up to + * a moderate 161. Otherwise, *add* 3, up to max 200. + * + * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, + * sensitivity has been reduced only a moderate or small amount; + * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, + * down to min 0. Otherwise (if gain has been significantly reduced), + * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. + * + * b) Save a snapshot of the "silence reference". + * + * If actual rate of CCK false alarms (+ plcp_errors) is too low + * (less than 5 for each 204.8 msecs listening), method for increasing + * sensitivity is used only if: + * + * 1a) Previous beacon did not have too many false alarms + * 1b) AND difference between previous "silence reference" and current + * "silence reference" (prev - current) is 2 or more, + * OR 2) 100 or more consecutive beacon periods have had rate of + * less than 5 false alarms per 204.8 milliseconds rx time. + * + * Method for increasing sensitivity: + * + * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, + * down to min 125. + * + * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * down to min 200. + * + * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. + * + * If actual rate of CCK false alarms (+ plcp_errors) is within good range + * (between 5 and 50 for each 204.8 msecs listening): + * + * 1) Save a snapshot of the silence reference. + * + * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), + * give some extra margin to energy threshold by *subtracting* 8 + * from value in HD_MIN_ENERGY_CCK_DET_INDEX. + * + * For all cases (too few, too many, good range), make sure that the CCK + * detection threshold (energy) is below the energy level for robust + * detection over the past 10 beacon periods, the "Max cck energy". + * Lower values mean higher energy; this means making sure that the value + * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". + * + */ + +/* + * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) + */ +#define HD_TABLE_SIZE (11) /* number of entries */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +/* Control field in struct iwl_sensitivity_cmd */ +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) + +/** + * struct iwl_sensitivity_cmd + * @control: (1) updates working table, (0) updates default table + * @table: energy threshold values, use HD_* as index into table + * + * Always use "1" in "control" to update uCode's working table and DSP. + */ +struct iwl_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ +} __packed; + + +/** + * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) + * + * This command sets the relative gains of 4965 device's 3 radio receiver chains. + * + * After the first association, driver should accumulate signal and noise + * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 + * beacons from the associated network (don't collect statistics that come + * in from scanning, or any other non-network source). + * + * DISCONNECTED ANTENNA: + * + * Driver should determine which antennas are actually connected, by comparing + * average beacon signal levels for the 3 Rx chains. Accumulate (add) the + * following values over 20 beacons, one accumulator for each of the chains + * a/b/c, from struct statistics_rx_non_phy: + * + * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the strongest signal from among a/b/c. Compare the other two to the + * strongest. If any signal is more than 15 dB (times 20, unless you + * divide the accumulated values by 20) below the strongest, the driver + * considers that antenna to be disconnected, and should not try to use that + * antenna/chain for Rx or Tx. If both A and B seem to be disconnected, + * driver should declare the stronger one as connected, and attempt to use it + * (A and B are the only 2 Tx chains!). + * + * + * RX BALANCE: + * + * Driver should balance the 3 receivers (but just the ones that are connected + * to antennas, see above) for gain, by comparing the average signal levels + * detected during the silence after each beacon (background noise). + * Accumulate (add) the following values over 20 beacons, one accumulator for + * each of the chains a/b/c, from struct statistics_rx_non_phy: + * + * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the weakest background noise level from among a/b/c. This Rx chain + * will be the reference, with 0 gain adjustment. Attenuate other channels by + * finding noise difference: + * + * (accum_noise[i] - accum_noise[reference]) / 30 + * + * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. + * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the + * driver should limit the difference results to a range of 0-3 (0-4.5 dB), + * and set bit 2 to indicate "reduce gain". The value for the reference + * (weakest) chain should be "0". + * + * diff_gain_[abc] bit fields: + * 2: (1) reduce gain, (0) increase gain + * 1-0: amount of gain, units of 1.5 dB + */ + +/* Phy calibration command for series */ +/* The default calibrate table size if not specified by firmware */ +#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 +enum { + IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, +}; + +#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) + +struct iwl_calib_hdr { + u8 op_code; + u8 first_group; + u8 groups_num; + u8 data_valid; +} __packed; + +/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ +struct iwl_calib_diff_gain_cmd { + struct iwl_calib_hdr hdr; + s8 diff_gain_a; /* see above */ + s8 diff_gain_b; + s8 diff_gain_c; + u8 reserved1; +} __packed; + +/****************************************************************************** + * (12) + * Miscellaneous Commands: + * + *****************************************************************************/ + +/* + * LEDs Command & Response + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * + * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), + * this command turns it on or off, or sets up a periodic blinking cycle. + */ +struct iwl_led_cmd { + __le32 interval; /* "interval" in uSec */ + u8 id; /* 1: Activity, 2: Link, 3: Tech */ + u8 off; /* # intervals off while blinking; + * "0", with >0 "on" value, turns LED on */ + u8 on; /* # intervals on while blinking; + * "0", regardless of "off", turns LED off */ + u8 reserved; +} __packed; + + +/****************************************************************************** + * (13) + * Union of all expected notifications/responses: + * + *****************************************************************************/ + +struct iwl_rx_packet { + /* + * The first 4 bytes of the RX frame header contain both the RX frame + * size and some flags. + * Bit fields: + * 31: flag flush RB request + * 30: flag ignore TC (terminal counter) request + * 29: flag fast IRQ request + * 28-14: Reserved + * 13-00: RX frame size + */ + __le32 len_n_flags; + struct iwl_cmd_header hdr; + union { + struct iwl3945_rx_frame rx_frame; + struct iwl3945_tx_resp tx_resp; + struct iwl3945_beacon_notif beacon_status; + + struct iwl_alive_resp alive_frame; + struct iwl_spectrum_notification spectrum_notif; + struct iwl_csa_notification csa_notif; + struct iwl_error_resp err_resp; + struct iwl_card_state_notif card_state_notif; + struct iwl_add_sta_resp add_sta; + struct iwl_rem_sta_resp rem_sta; + struct iwl_sleep_notification sleep_notif; + struct iwl_spectrum_resp spectrum; + struct iwl_notif_statistics stats; + struct iwl_compressed_ba_resp compressed_ba; + struct iwl_missed_beacon_notif missed_beacon; + __le32 status; + u8 raw[0]; + } u; +} __packed; + +#endif /* __iwl_legacy_commands_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c new file mode 100644 index 000000000000..c95c3bcb724d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@ -0,0 +1,2668 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <net/mac80211.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-power.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" + + +MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); +MODULE_VERSION(IWLWIFI_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +bool bt_coex_active = true; +EXPORT_SYMBOL_GPL(bt_coex_active); +module_param(bt_coex_active, bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); + +u32 iwl_debug_level; +EXPORT_SYMBOL(iwl_debug_level); + +const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +EXPORT_SYMBOL(iwl_bcast_addr); + + +/* This function both allocates and initializes hw and priv. */ +struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg) +{ + struct iwl_priv *priv; + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), + cfg->ops->ieee80211_ops); + if (hw == NULL) { + pr_err("%s: Can not allocate network device\n", + cfg->name); + goto out; + } + + priv = hw->priv; + priv->hw = hw; + +out: + return hw; +} +EXPORT_SYMBOL(iwl_legacy_alloc_all); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) +{ + u16 max_bit_rate = 0; + u8 rx_chains_num = priv->hw_params.rx_chains_num; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + + ht_info->cap = 0; + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + + ht_info->ht_supported = true; + + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + max_bit_rate = MAX_BIT_RATE_20_MHZ; + if (priv->hw_params.ht40_channel & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + ht_info->mcs.rx_mask[4] = 0x01; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + if (priv->cfg->mod_params->amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains_num >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains_num >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains_num; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains_num != rx_chains_num) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains_num - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +/** + * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +int iwl_legacy_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || + priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY), + GFP_KERNEL); + if (!rates) { + kfree(channels); + return -ENOMEM; + } + + /* 5.2GHz channels start after the 2.4GHz channels */ + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + /* just OFDM */ + sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; + + if (priv->cfg->sku & IWL_SKU_N) + iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_5GHZ); + + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + sband->channels = channels; + /* OFDM & CCK */ + sband->bitrates = rates; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY; + + if (priv->cfg->sku & IWL_SKU_N) + iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_2GHZ); + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + for (i = 0; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!iwl_legacy_is_channel_valid(ch)) + continue; + + if (iwl_legacy_is_channel_a_band(ch)) + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + else + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + + geo_ch = &sband->channels[sband->n_channels++]; + + geo_ch->center_freq = + ieee80211_channel_to_frequency(ch->channel, ch->band); + geo_ch->max_power = ch->max_power_avg; + geo_ch->max_antenna_gain = 0xff; + geo_ch->hw_value = ch->channel; + + if (iwl_legacy_is_channel_valid(ch)) { + if (!(ch->flags & EEPROM_CHANNEL_IBSS)) + geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) + geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flags |= IEEE80211_CHAN_RADAR; + + geo_ch->flags |= ch->ht40_extension_channel; + + if (ch->max_power_avg > priv->tx_power_device_lmt) + priv->tx_power_device_lmt = ch->max_power_avg; + } else { + geo_ch->flags |= IEEE80211_CHAN_DISABLED; + } + + IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", + ch->channel, geo_ch->center_freq, + iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4", + geo_ch->flags & IEEE80211_CHAN_DISABLED ? + "restricted" : "valid", + geo_ch->flags); + } + + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && + priv->cfg->sku & IWL_SKU_A) { + IWL_INFO(priv, "Incorrectly detected BG card as ABG. " + "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, + priv->pci_dev->subsystem_device); + priv->cfg->sku &= ~IWL_SKU_A; + } + + IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", + priv->bands[IEEE80211_BAND_2GHZ].n_channels, + priv->bands[IEEE80211_BAND_5GHZ].n_channels); + + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_init_geos); + +/* + * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos + */ +void iwl_legacy_free_geos(struct iwl_priv *priv) +{ + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + clear_bit(STATUS_GEO_CONFIGURED, &priv->status); +} +EXPORT_SYMBOL(iwl_legacy_free_geos); + +static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv, + enum ieee80211_band band, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (!iwl_legacy_is_channel_valid(ch_info)) + return false; + + if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40PLUS); + else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40MINUS); + + return false; +} + +bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap) +{ + if (!ctx->ht.enabled || !ctx->ht.is_40mhz) + return false; + + /* + * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * the bit will not set if it is pure 40MHz case + */ + if (ht_cap && !ht_cap->ht_supported) + return false; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + if (priv->disable_ht40) + return false; +#endif + + return iwl_legacy_is_channel_extension(priv, priv->band, + le16_to_cpu(ctx->staging.channel), + ctx->ht.extension_chan_offset); +} +EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed); + +static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val; + u16 beacon_factor; + + /* + * If mac80211 hasn't given us a beacon interval, program + * the default into the device. + */ + if (!beacon_val) + return DEFAULT_BEACON_INTERVAL; + + /* + * If the beacon interval we obtained from the peer + * is too large, we'll have to wake up more often + * (and in IBSS case, we'll beacon too much) + * + * For example, if max_beacon_val is 4096, and the + * requested beacon interval is 7000, we'll have to + * use 3500 to be able to wake up on the beacons. + * + * This could badly influence beacon detection stats. + */ + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +int +iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + u64 tsf; + s32 interval_tm, rem; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + struct ieee80211_vif *vif = ctx->vif; + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + lockdep_assert_held(&priv->mutex); + + memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); + + ctx->timing.timestamp = cpu_to_le64(priv->timestamp); + ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); + + beacon_int = vif ? vif->bss_conf.beacon_int : 0; + + /* + * TODO: For IBSS we need to get atim_window from mac80211, + * for now just always use 0 + */ + ctx->timing.atim_window = 0; + + beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int, + priv->hw_params.max_beacon_itrvl * TIME_UNIT); + ctx->timing.beacon_interval = cpu_to_le16(beacon_int); + + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * TIME_UNIT; + rem = do_div(tsf, interval_tm); + ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; + + IWL_DEBUG_ASSOC(priv, + "beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(ctx->timing.beacon_interval), + le32_to_cpu(ctx->timing.beacon_init_val), + le16_to_cpu(ctx->timing.atim_window)); + + return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd, + sizeof(ctx->timing), &ctx->timing); +} +EXPORT_SYMBOL(iwl_legacy_send_rxon_timing); + +void +iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + int hw_decrypt) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto); + +/* validate RXON structure is valid */ +int +iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + bool error = false; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { + IWL_WARN(priv, "check 2.4G: wrong narrow\n"); + error = true; + } + if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { + IWL_WARN(priv, "check 2.4G: wrong radar\n"); + error = true; + } + } else { + if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "check 5.2G: not short slot!\n"); + error = true; + } + if (rxon->flags & RXON_FLG_CCK_MSK) { + IWL_WARN(priv, "check 5.2G: CCK!\n"); + error = true; + } + } + if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { + IWL_WARN(priv, "mac/bssid mcast!\n"); + error = true; + } + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && + (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { + IWL_WARN(priv, "neither 1 nor 6 are basic\n"); + error = true; + } + + if (le16_to_cpu(rxon->assoc_id) > 2007) { + IWL_WARN(priv, "aid > 2007\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "CCK and short slot\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { + IWL_WARN(priv, "CCK and auto detect"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == + RXON_FLG_TGG_PROTECT_MSK) { + IWL_WARN(priv, "TGg but no auto-detect\n"); + error = true; + } + + if (error) + IWL_WARN(priv, "Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERR(priv, "Invalid RXON\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd); + +/** + * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @priv: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +int iwl_legacy_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_legacy_rxon_cmd *staging = &ctx->staging; + const struct iwl_legacy_rxon_cmd *active = &ctx->active; + +#define CHK(cond) \ + if ((cond)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ + return 1; \ + } + +#define CHK_NEQ(c1, c2) \ + if ((c1) != (c2)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " \ + #c1 " != " #c2 " - %d != %d\n", \ + (c1), (c2)); \ + return 1; \ + } + + /* These items are only settable from the full RXON command */ + CHK(!iwl_legacy_is_associated_ctx(ctx)); + CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); + CHK(compare_ether_addr(staging->node_addr, active->node_addr)); + CHK(compare_ether_addr(staging->wlap_bssid_addr, + active->wlap_bssid_addr)); + CHK_NEQ(staging->dev_type, active->dev_type); + CHK_NEQ(staging->channel, active->channel); + CHK_NEQ(staging->air_propagation, active->air_propagation); + CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, + active->ofdm_ht_single_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, + active->ofdm_ht_dual_stream_basic_rates); + CHK_NEQ(staging->assoc_id, active->assoc_id); + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, + active->flags & RXON_FLG_BAND_24G_MSK); + + /* Check if we are switching association toggle */ + CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, + active->filter_flags & RXON_FILTER_ASSOC_MSK); + +#undef CHK +#undef CHK_NEQ + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_full_rxon_required); + +u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + /* + * Assign the lowest rate -- should really get this from + * the beacon skb from mac80211. + */ + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) + return IWL_RATE_1M_PLCP; + else + return IWL_RATE_6M_PLCP; +} +EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp); + +static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf, + struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + if (!ctx->ht.enabled) { + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + return; + } + + rxon->flags |= cpu_to_le32(ctx->ht.protection << + RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) { + /* pure ht40 */ + if (ctx->ht.protection == + IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* channel location only valid if in Mixed mode */ + IWL_ERR(priv, + "invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", + le32_to_cpu(rxon->flags), ctx->ht.protection, + ctx->ht.extension_chan_offset); +} + +void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) +{ + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) + _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx); +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_ht); + +/* Return valid, unused, channel for a passive scan to reset the RF */ +u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band) +{ + const struct iwl_channel_info *ch_info; + int i; + u8 channel = 0; + u8 min, max; + struct iwl_rxon_context *ctx; + + if (band == IEEE80211_BAND_5GHZ) { + min = 14; + max = priv->channel_count; + } else { + min = 0; + max = 14; + } + + for (i = min; i < max; i++) { + bool busy = false; + + for_each_context(priv, ctx) { + busy = priv->channel_info[i].channel == + le16_to_cpu(ctx->staging.channel); + if (busy) + break; + } + + if (busy) + continue; + + channel = priv->channel_info[i].channel; + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (iwl_legacy_is_channel_valid(ch_info)) + break; + } + + return channel; +} +EXPORT_SYMBOL(iwl_legacy_get_single_channel_number); + +/** + * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON + * @ch: requested channel as a pointer to struct ieee80211_channel + + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the ch->band + */ +int +iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx) +{ + enum ieee80211_band band = ch->band; + u16 channel = ch->hw_value; + + if ((le16_to_cpu(ctx->staging.channel) == channel) && + (priv->band == band)) + return 0; + + ctx->staging.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; + else + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + + priv->band = band; + + IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_channel); + +void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + if (band == IEEE80211_BAND_5GHZ) { + ctx->staging.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_post_associate() */ + if (vif && vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; + ctx->staging.flags &= ~RXON_FLG_CCK_MSK; + } +} +EXPORT_SYMBOL(iwl_legacy_set_flags_for_band); + +/* + * initialize rxon structure with default values from eeprom + */ +void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_channel_info *ch_info; + + memset(&ctx->staging, 0, sizeof(ctx->staging)); + + if (!ctx->vif) { + ctx->staging.dev_type = ctx->unused_devtype; + } else + switch (ctx->vif->type) { + + case NL80211_IFTYPE_STATION: + ctx->staging.dev_type = ctx->station_devtype; + ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + ctx->staging.dev_type = ctx->ibss_devtype; + ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + default: + IWL_ERR(priv, "Unsupported interface type %d\n", + ctx->vif->type); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_legacy_get_channel_info(priv, priv->band, + le16_to_cpu(ctx->active.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + ctx->staging.channel = cpu_to_le16(ch_info->channel); + priv->band = ch_info->band; + + iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif); + + ctx->staging.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + ctx->staging.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + /* clear both MIX and PURE40 mode flag */ + ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | + RXON_FLG_CHANNEL_MODE_PURE_40); + if (ctx->vif) + memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); + + ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; +} +EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config); + +void iwl_legacy_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_supported_band *hw = NULL; + struct ieee80211_rate *rate; + struct iwl_rxon_context *ctx; + int i; + + hw = iwl_get_hw_mode(priv, priv->band); + if (!hw) { + IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n"); + return; + } + + priv->active_rate = 0; + + for (i = 0; i < hw->n_bitrates; i++) { + rate = &(hw->bitrates[i]); + if (rate->hw_value < IWL_RATE_COUNT_LEGACY) + priv->active_rate |= (1 << rate->hw_value); + } + + IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); + + for_each_context(priv, ctx) { + ctx->staging.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + ctx->staging.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + } +} +EXPORT_SYMBOL(iwl_legacy_set_rate); + +void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->switch_rxon.switch_in_progress) { + ieee80211_chswitch_done(ctx->vif, is_success); + mutex_lock(&priv->mutex); + priv->switch_rxon.switch_in_progress = false; + mutex_unlock(&priv->mutex); + } +} +EXPORT_SYMBOL(iwl_legacy_chswitch_done); + +void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; + + if (priv->switch_rxon.switch_in_progress) { + if (!le32_to_cpu(csa->status) && + (csa->channel == priv->switch_rxon.channel)) { + rxon->channel = csa->channel; + ctx->staging.channel = csa->channel; + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", + le16_to_cpu(csa->channel)); + iwl_legacy_chswitch_done(priv, true); + } else { + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + iwl_legacy_chswitch_done(priv, false); + } + } +} +EXPORT_SYMBOL(iwl_legacy_rx_csa); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); + iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", + le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", + rxon->cck_basic_rates); + IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); + IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", + le16_to_cpu(rxon->assoc_id)); +} +EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd); +#endif +/** + * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card + */ +void iwl_legacy_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + + IWL_ERR(priv, "Loaded firmware version: %s\n", + priv->hw->wiphy->fw_version); + + priv->cfg->ops->lib->dump_nic_error_log(priv); + if (priv->cfg->ops->lib->dump_fh) + priv->cfg->ops->lib->dump_fh(priv, NULL, false); + priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) + iwl_legacy_print_rx_config_cmd(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(priv, IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (priv->cfg->mod_params->restart_fw) + queue_work(priv->workqueue, &priv->restart); + } +} +EXPORT_SYMBOL(iwl_legacy_irq_handle_error); + +static int iwl_legacy_apm_stop_master(struct iwl_priv *priv) +{ + int ret = 0; + + /* stop device's busmaster DMA activity */ + iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (ret) + IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n"); + + IWL_DEBUG_INFO(priv, "stop master\n"); + + return ret; +} + +void iwl_legacy_apm_stop(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); + + /* Stop device's DMA activity */ + iwl_legacy_apm_stop_master(priv); + + /* Reset the entire device */ + iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} +EXPORT_SYMBOL(iwl_legacy_apm_stop); + + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +int iwl_legacy_apm_init(struct iwl_priv *priv) +{ + int ret = 0; + u16 lctl; + + IWL_DEBUG_INFO(priv, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG, + CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + * NOTE: This is no-op for 3945 (non-existant bit) + */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + /* + * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + if (priv->cfg->base_params->set_l0s) { + lctl = iwl_legacy_pcie_link_ctl(priv); + if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == + PCI_CFG_LINK_CTRL_VAL_L1_EN) { + /* L1-ASPM enabled; disable(!) L0S */ + iwl_legacy_set_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); + } else { + /* L1-ASPM disabled; enable(!) L0S */ + iwl_legacy_clear_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n"); + } + } + + /* Configure analog phase-lock-loop before activating to D0A */ + if (priv->cfg->base_params->pll_cfg_val) + iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG, + priv->cfg->base_params->pll_cfg_val); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_legacy_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(priv, "Failed to init the card\n"); + goto out; + } + + /* + * Enable DMA and BSM (if used) clocks, wait for them to stabilize. + * BSM (Boostrap State Machine) is only in 3945 and 4965. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits + * do not disable clocks. This preserves any hardware bits already + * set by default in "CLK_CTRL_REG" after reset. + */ + if (priv->cfg->base_params->use_bsm) + iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); + else + iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + +out: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_apm_init); + + +int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) +{ + int ret; + s8 prev_tx_power; + + lockdep_assert_held(&priv->mutex); + + if (priv->tx_power_user_lmt == tx_power && !force) + return 0; + + if (!priv->cfg->ops->lib->send_tx_power) + return -EOPNOTSUPP; + + if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { + IWL_WARN(priv, + "Requested user TXPOWER %d below lower limit %d.\n", + tx_power, + IWL4965_TX_POWER_TARGET_POWER_MIN); + return -EINVAL; + } + + if (tx_power > priv->tx_power_device_lmt) { + IWL_WARN(priv, + "Requested user TXPOWER %d above upper limit %d.\n", + tx_power, priv->tx_power_device_lmt); + return -EINVAL; + } + + if (!iwl_legacy_is_ready_rf(priv)) + return -EIO; + + /* scan complete use tx_power_next, need to be updated */ + priv->tx_power_next = tx_power; + if (test_bit(STATUS_SCANNING, &priv->status) && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n"); + return 0; + } + + prev_tx_power = priv->tx_power_user_lmt; + priv->tx_power_user_lmt = tx_power; + + ret = priv->cfg->ops->lib->send_tx_power(priv); + + /* if fail to set tx_power, restore the orig. tx power */ + if (ret) { + priv->tx_power_user_lmt = prev_tx_power; + priv->tx_power_next = prev_tx_power; + } + return ret; +} +EXPORT_SYMBOL(iwl_legacy_set_tx_power); + +void iwl_legacy_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + IWL_DEBUG_INFO(priv, "BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd)) + IWL_ERR(priv, "failed to send BT Coex Config\n"); +} +EXPORT_SYMBOL(iwl_legacy_send_bt_config); + +int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) +{ + struct iwl_statistics_cmd statistics_cmd = { + .configuration_flags = + clear ? IWL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd, NULL); + else + return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_statistics_request); + +void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} +EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif); + +void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " + "notification for %s:\n", len, + iwl_legacy_get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); +} +EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif); + +void iwl_legacy_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} +EXPORT_SYMBOL(iwl_legacy_rx_reply_error); + +void iwl_legacy_clear_isr_stats(struct iwl_priv *priv) +{ + memset(&priv->isr_stats, 0, sizeof(priv->isr_stats)); +} + +int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx; + unsigned long flags; + int q; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + for_each_context(priv, ctx) { + ctx->qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + ctx->qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + ctx->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_mac_conf_tx); + +int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + return priv->ibss_manager == IWL_IBSS_MANAGER; +} +EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon); + +static int +iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + iwl_legacy_connection_init_rx_config(priv, ctx); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + return iwl_legacy_commit_rxon(priv, ctx); +} + +static int iwl_legacy_setup_interface(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err; + + lockdep_assert_held(&priv->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + priv->iw_mode = vif->type; + + ctx->is_active = true; + + err = iwl_legacy_set_mode(priv, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + return 0; +} + +int +iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *tmp, *ctx = NULL; + int err; + + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + vif->type, vif->addr); + + mutex_lock(&priv->mutex); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_WARN(priv, "Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + for_each_context(priv, tmp) { + u32 possible_modes = + tmp->interface_modes | tmp->exclusive_interface_modes; + + if (tmp->vif) { + /* check if this busy context is exclusive */ + if (tmp->exclusive_interface_modes & + BIT(tmp->vif->type)) { + err = -EINVAL; + goto out; + } + continue; + } + + if (!(possible_modes & BIT(vif->type))) + continue; + + /* have maybe usable context w/o interface */ + ctx = tmp; + break; + } + + if (!ctx) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = ctx; + ctx->vif = vif; + + err = iwl_legacy_setup_interface(priv, ctx); + if (!err) + goto out; + + ctx->vif = NULL; + priv->iw_mode = NL80211_IFTYPE_STATION; + out: + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} +EXPORT_SYMBOL(iwl_legacy_mac_add_interface); + +static void iwl_legacy_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) +{ + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&priv->mutex); + + if (priv->scan_vif == vif) { + iwl_legacy_scan_cancel_timeout(priv, 200); + iwl_legacy_force_scan_end(priv); + } + + if (!mode_change) { + iwl_legacy_set_mode(priv, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } +} + +void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + WARN_ON(ctx->vif != vif); + ctx->vif = NULL; + + iwl_legacy_teardown_interface(priv, vif, false); + + memset(priv->bssid, 0, ETH_ALEN); + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} +EXPORT_SYMBOL(iwl_legacy_mac_remove_interface); + +int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv) +{ + if (!priv->txq) + priv->txq = kzalloc( + sizeof(struct iwl_tx_queue) * + priv->cfg->base_params->num_of_queues, + GFP_KERNEL); + if (!priv->txq) { + IWL_ERR(priv, "Not enough memory for txq\n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem); + +void iwl_legacy_txq_mem(struct iwl_priv *priv) +{ + kfree(priv->txq); + priv->txq = NULL; +} +EXPORT_SYMBOL(iwl_legacy_txq_mem); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + +#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) + +void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) +{ + priv->tx_traffic_idx = 0; + priv->rx_traffic_idx = 0; + if (priv->tx_traffic) + memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); + if (priv->rx_traffic) + memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); +} + +int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) +{ + u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; + + if (iwl_debug_level & IWL_DL_TX) { + if (!priv->tx_traffic) { + priv->tx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->tx_traffic) + return -ENOMEM; + } + } + if (iwl_debug_level & IWL_DL_RX) { + if (!priv->rx_traffic) { + priv->rx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->rx_traffic) + return -ENOMEM; + } + } + iwl_legacy_reset_traffic_log(priv); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem); + +void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) +{ + kfree(priv->tx_traffic); + priv->tx_traffic = NULL; + + kfree(priv->rx_traffic); + priv->rx_traffic = NULL; +} +EXPORT_SYMBOL(iwl_legacy_free_traffic_mem); + +void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwl_debug_level & IWL_DL_TX))) + return; + + if (!priv->tx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->tx_traffic + + (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->tx_traffic_idx = + (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame); + +void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwl_debug_level & IWL_DL_RX))) + return; + + if (!priv->rx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->rx_traffic + + (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->rx_traffic_idx = + (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame); + +const char *iwl_legacy_get_mgmt_string(int cmd) +{ + switch (cmd) { + IWL_CMD(MANAGEMENT_ASSOC_REQ); + IWL_CMD(MANAGEMENT_ASSOC_RESP); + IWL_CMD(MANAGEMENT_REASSOC_REQ); + IWL_CMD(MANAGEMENT_REASSOC_RESP); + IWL_CMD(MANAGEMENT_PROBE_REQ); + IWL_CMD(MANAGEMENT_PROBE_RESP); + IWL_CMD(MANAGEMENT_BEACON); + IWL_CMD(MANAGEMENT_ATIM); + IWL_CMD(MANAGEMENT_DISASSOC); + IWL_CMD(MANAGEMENT_AUTH); + IWL_CMD(MANAGEMENT_DEAUTH); + IWL_CMD(MANAGEMENT_ACTION); + default: + return "UNKNOWN"; + + } +} + +const char *iwl_legacy_get_ctrl_string(int cmd) +{ + switch (cmd) { + IWL_CMD(CONTROL_BACK_REQ); + IWL_CMD(CONTROL_BACK); + IWL_CMD(CONTROL_PSPOLL); + IWL_CMD(CONTROL_RTS); + IWL_CMD(CONTROL_CTS); + IWL_CMD(CONTROL_ACK); + IWL_CMD(CONTROL_CFEND); + IWL_CMD(CONTROL_CFENDACK); + default: + return "UNKNOWN"; + + } +} + +void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv) +{ + memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); + memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); +} + +/* + * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined, + * iwl_legacy_update_stats function will + * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass + * Use debugFs to display the rx/rx_statistics + * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL + * information will be recorded, but DATA pkt still will be recorded + * for the reason of iwl_led.c need to control the led blinking based on + * number of tx and rx data. + * + */ +void +iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) +{ + struct traffic_stats *stats; + + if (is_tx) + stats = &priv->tx_stats; + else + stats = &priv->rx_stats; + + if (ieee80211_is_mgmt(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + stats->mgmt[MANAGEMENT_ASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): + stats->mgmt[MANAGEMENT_ASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + stats->mgmt[MANAGEMENT_REASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): + stats->mgmt[MANAGEMENT_REASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + stats->mgmt[MANAGEMENT_PROBE_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + stats->mgmt[MANAGEMENT_PROBE_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BEACON): + stats->mgmt[MANAGEMENT_BEACON]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ATIM): + stats->mgmt[MANAGEMENT_ATIM]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + stats->mgmt[MANAGEMENT_DISASSOC]++; + break; + case cpu_to_le16(IEEE80211_STYPE_AUTH): + stats->mgmt[MANAGEMENT_AUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + stats->mgmt[MANAGEMENT_DEAUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACTION): + stats->mgmt[MANAGEMENT_ACTION]++; + break; + } + } else if (ieee80211_is_ctl(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): + stats->ctrl[CONTROL_BACK_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BACK): + stats->ctrl[CONTROL_BACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PSPOLL): + stats->ctrl[CONTROL_PSPOLL]++; + break; + case cpu_to_le16(IEEE80211_STYPE_RTS): + stats->ctrl[CONTROL_RTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CTS): + stats->ctrl[CONTROL_CTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACK): + stats->ctrl[CONTROL_ACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFEND): + stats->ctrl[CONTROL_CFEND]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFENDACK): + stats->ctrl[CONTROL_CFENDACK]++; + break; + } + } else { + /* data */ + stats->data_cnt++; + stats->data_bytes += len; + } +} +EXPORT_SYMBOL(iwl_legacy_update_stats); +#endif + +static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_legacy_is_any_associated(priv)) { + IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); + return; + } + /* + * There is no easy and better way to force reset the radio, + * the only known method is switching channel which will force to + * reset and tune the radio. + * Use internal short scan (single channel) operation to should + * achieve this objective. + * Driver should reset the radio when number of consecutive missed + * beacon, or any other uCode error condition detected. + */ + IWL_DEBUG_INFO(priv, "perform radio reset.\n"); + iwl_legacy_internal_short_hw_scan(priv); +} + + +int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external) +{ + struct iwl_force_reset *force_reset; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + + if (mode >= IWL_MAX_FORCE_RESET) { + IWL_DEBUG_INFO(priv, "invalid reset request.\n"); + return -EINVAL; + } + force_reset = &priv->force_reset[mode]; + force_reset->reset_request_count++; + if (!external) { + if (force_reset->last_force_reset_jiffies && + time_after(force_reset->last_force_reset_jiffies + + force_reset->reset_duration, jiffies)) { + IWL_DEBUG_INFO(priv, "force reset rejected\n"); + force_reset->reset_reject_count++; + return -EAGAIN; + } + } + force_reset->reset_success_count++; + force_reset->last_force_reset_jiffies = jiffies; + IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode); + switch (mode) { + case IWL_RF_RESET: + _iwl_legacy_force_rf_reset(priv); + break; + case IWL_FW_RESET: + /* + * if the request is from external(ex: debugfs), + * then always perform the request in regardless the module + * parameter setting + * if the request is from internal (uCode error or driver + * detect failure), then fw_restart module parameter + * need to be check before performing firmware reload + */ + if (!external && !priv->cfg->mod_params->restart_fw) { + IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " + "module parameter setting\n"); + break; + } + IWL_ERR(priv, "On demand firmware reload\n"); + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + /* + * Keep the restart process from trying to send host + * commands by clearing the INIT status bit + */ + clear_bit(STATUS_READY, &priv->status); + queue_work(priv->workqueue, &priv->restart); + break; + } + return 0; +} + +int +iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + struct iwl_rxon_context *tmp; + u32 interface_modes; + int err; + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&priv->mutex); + + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; + + if (!(interface_modes & BIT(newtype))) { + err = -EBUSY; + goto out; + } + + if (ctx->exclusive_interface_modes & BIT(newtype)) { + for_each_context(priv, tmp) { + if (ctx == tmp) + continue; + + if (!tmp->vif) + continue; + + /* + * The current mode switch would be exclusive, but + * another context is active ... refuse the switch. + */ + err = -EBUSY; + goto out; + } + } + + /* success */ + iwl_legacy_teardown_interface(priv, vif, true); + vif->type = newtype; + err = iwl_legacy_setup_interface(priv, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + + out: + mutex_unlock(&priv->mutex); + return err; +} +EXPORT_SYMBOL(iwl_legacy_mac_change_interface); + +/* + * On every watchdog tick we check (latest) time stamp. If it does not + * change during timeout period and queue is not empty we reset firmware. + */ +static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt) +{ + struct iwl_tx_queue *txq = &priv->txq[cnt]; + struct iwl_queue *q = &txq->q; + unsigned long timeout; + int ret; + + if (q->read_ptr == q->write_ptr) { + txq->time_stamp = jiffies; + return 0; + } + + timeout = txq->time_stamp + + msecs_to_jiffies(priv->cfg->base_params->wd_timeout); + + if (time_after(jiffies, timeout)) { + IWL_ERR(priv, "Queue %d stuck for %u ms.\n", + q->id, priv->cfg->base_params->wd_timeout); + ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false); + return (ret == -EAGAIN) ? 0 : 1; + } + + return 0; +} + +/* + * Making watchdog tick be a quarter of timeout assure we will + * discover the queue hung between timeout and 1.25*timeout + */ +#define IWL_WD_TICK(timeout) ((timeout) / 4) + +/* + * Watchdog timer callback, we check each tx queue for stuck, if if hung + * we reset the firmware. If everything is fine just rearm the timer. + */ +void iwl_legacy_bg_watchdog(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + int cnt; + unsigned long timeout; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + timeout = priv->cfg->base_params->wd_timeout; + if (timeout == 0) + return; + + /* monitor and check for stuck cmd queue */ + if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue)) + return; + + /* monitor and check for other stuck queues */ + if (iwl_legacy_is_any_associated(priv)) { + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + /* skip as we already checked the command queue */ + if (cnt == priv->cmd_queue) + continue; + if (iwl_legacy_check_stuck_queue(priv, cnt)) + return; + } + } + + mod_timer(&priv->watchdog, jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); +} +EXPORT_SYMBOL(iwl_legacy_bg_watchdog); + +void iwl_legacy_setup_watchdog(struct iwl_priv *priv) +{ + unsigned int timeout = priv->cfg->base_params->wd_timeout; + + if (timeout) + mod_timer(&priv->watchdog, + jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); +} +EXPORT_SYMBOL(iwl_legacy_setup_watchdog); + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +u32 +iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, + u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & + (iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits) >> + priv->hw_params.beacon_time_tsf_bits); + rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + + return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; +} +EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons); + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval) +{ + u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)) + + (addon & iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << priv->hw_params.beacon_time_tsf_bits); + } else + res += (1 << priv->hw_params.beacon_time_tsf_bits); + + return cpu_to_le32(res); +} +EXPORT_SYMBOL(iwl_legacy_add_beacon_time); + +#ifdef CONFIG_PM + +int iwl_legacy_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_priv *priv = pci_get_drvdata(pdev); + + /* + * This function is called when system goes into suspend state + * mac80211 will call iwl_mac_stop() from the mac80211 suspend function + * first but since iwl_mac_stop() has no knowledge of who the caller is, + * it will not call apm_ops.stop() to stop the DMA operation. + * Calling apm_ops.stop here to make sure we stop the DMA. + */ + iwl_legacy_apm_stop(priv); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_pci_suspend); + +int iwl_legacy_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_priv *priv = pci_get_drvdata(pdev); + bool hw_rfkill = false; + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl_legacy_enable_interrupts(priv); + + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rfkill = true; + + if (hw_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_pci_resume); + +const struct dev_pm_ops iwl_legacy_pm_ops = { + .suspend = iwl_legacy_pci_suspend, + .resume = iwl_legacy_pci_resume, + .freeze = iwl_legacy_pci_suspend, + .thaw = iwl_legacy_pci_resume, + .poweroff = iwl_legacy_pci_suspend, + .restore = iwl_legacy_pci_resume, +}; +EXPORT_SYMBOL(iwl_legacy_pm_ops); + +#endif /* CONFIG_PM */ + +static void +iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!ctx->is_active) + return; + + ctx->qos_data.def_qos_parm.qos_flags = 0; + + if (ctx->qos_data.qos_active) + ctx->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + if (ctx->ht.enabled) + ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + + IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + ctx->qos_data.qos_active, + ctx->qos_data.def_qos_parm.qos_flags); + + iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd, + sizeof(struct iwl_qosparam_cmd), + &ctx->qos_data.def_qos_parm, NULL); +} + +/** + * iwl_legacy_mac_config - mac80211 config callback + */ +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = conf->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct iwl_rxon_context *ctx; + unsigned long flags = 0; + int ret = 0; + u16 ch; + int scan_active = 0; + bool ht_changed[NUM_IWL_RXON_CTX] = {}; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return -EOPNOTSUPP; + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", + channel->hw_value, changed); + + if (unlikely(!priv->cfg->mod_params->disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + scan_active = 1; + IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); + } + + if (changed & (IEEE80211_CONF_CHANGE_SMPS | + IEEE80211_CONF_CHANGE_CHANNEL)) { + /* mac80211 uses static for non-HT which is what we want */ + priv->current_ht_config.smps = conf->smps_mode; + + /* + * Recalculate chain counts. + * + * If monitor mode is enabled then mac80211 will + * set up the SM PS mode to OFF if an HT channel is + * configured. + */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + for_each_context(priv, ctx) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + /* during scanning mac80211 will delay channel setting until + * scan finish with changed = 0 + */ + if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (scan_active) + goto set_ch_out; + + ch = channel->hw_value; + ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + spin_lock_irqsave(&priv->lock, flags); + + for_each_context(priv, ctx) { + /* Configure HT40 channels */ + if (ctx->ht.enabled != conf_is_ht(conf)) { + ctx->ht.enabled = conf_is_ht(conf); + ht_changed[ctx->ctxid] = true; + } + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + /* + * Default to no protection. Protection mode will + * later be set from BSS config in iwl_ht_conf + */ + ctx->ht.protection = + IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_legacy_set_rxon_channel(priv, channel, ctx); + iwl_legacy_set_rxon_ht(priv, ht_conf); + + iwl_legacy_set_flags_for_band(priv, ctx, channel->band, + ctx->vif); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->cfg->ops->legacy->update_bcast_stations) + ret = + priv->cfg->ops->legacy->update_bcast_stations(priv); + + set_ch_out: + /* The list of supported rates and rate mask can be different + * for each band; since the band may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_legacy_set_rate(priv); + } + + if (changed & (IEEE80211_CONF_CHANGE_PS | + IEEE80211_CONF_CHANGE_IDLE)) { + ret = iwl_legacy_power_update_mode(priv, false); + if (ret) + IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", + priv->tx_power_user_lmt, conf->power_level); + + iwl_legacy_set_tx_power(priv, conf->power_level, false); + } + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; + } + + if (scan_active) + goto out; + + for_each_context(priv, ctx) { + if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) + iwl_legacy_commit_rxon(priv, ctx); + else + IWL_DEBUG_INFO(priv, + "Not re-sending same RXON configuration.\n"); + if (ht_changed[ctx->ctxid]) + iwl_legacy_update_qos(priv, ctx); + } + +out: + IWL_DEBUG_MAC80211(priv, "leave\n"); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_config); + +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + /* IBSS can only be the IWL_RXON_CTX_BSS context */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); + spin_unlock_irqrestore(&priv->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + + /* new association get rid of ibss beacon skb */ + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = NULL; + + priv->timestamp = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_legacy_scan_cancel_timeout(priv, 100); + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* we are restarting association process + * clear RXON_FILTER_ASSOC_MSK bit + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + iwl_legacy_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf); + +static void iwl_legacy_ht_conf(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct ieee80211_sta *sta; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + IWL_DEBUG_ASSOC(priv, "enter:\n"); + + if (!ctx->ht.enabled) + return; + + ctx->ht.protection = + bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; + ctx->ht.non_gf_sta_present = + !!(bss_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + + ht_conf->single_chain_sufficient = false; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (sta) { + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int maxstreams; + + maxstreams = (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if ((ht_cap->mcs.rx_mask[1] == 0) && + (ht_cap->mcs.rx_mask[2] == 0)) + ht_conf->single_chain_sufficient = true; + if (maxstreams <= 1) + ht_conf->single_chain_sufficient = true; + } else { + /* + * If at all, this can only happen through a race + * when the AP disconnects us while we're still + * setting up the connection, in that case mac80211 + * will soon tell us about that. + */ + ht_conf->single_chain_sufficient = true; + } + rcu_read_unlock(); + break; + case NL80211_IFTYPE_ADHOC: + ht_conf->single_chain_sufficient = true; + break; + default: + break; + } + + IWL_DEBUG_ASSOC(priv, "leave\n"); +} + +static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + /* + * inform the ucode that there is no longer an + * association and that no more packets should be + * sent + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ctx->staging.assoc_id = 0; + iwl_legacy_commit_rxon(priv, ctx); +} + +static void iwl_legacy_beacon_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + __le64 timestamp; + struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + + if (!skb) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "update beacon but no beacon context!\n"); + dev_kfree_skb(skb); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = skb; + + timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + priv->timestamp = le64_to_cpu(timestamp); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return; + } + + priv->cfg->ops->legacy->post_associate(priv); +} + +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + int ret; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return; + + IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); + + if (!iwl_legacy_is_alive(priv)) + return; + + mutex_lock(&priv->mutex); + + if (changes & BSS_CHANGED_QOS) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ctx->qos_data.qos_active = bss_conf->qos; + iwl_legacy_update_qos(priv, ctx); + spin_unlock_irqrestore(&priv->lock, flags); + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + /* + * the add_interface code must make sure we only ever + * have a single interface that could be beaconing at + * any time. + */ + if (vif->bss_conf.enable_beacon) + priv->beacon_ctx = ctx; + else + priv->beacon_ctx = NULL; + } + + if (changes & BSS_CHANGED_BSSID) { + IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); + + /* + * If there is currently a HW scan going on in the + * background then we need to cancel it else the RXON + * below/in post_associate will fail. + */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) { + IWL_WARN(priv, + "Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211(priv, + "leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* mac80211 only sets assoc when in STATION mode */ + if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { + memcpy(ctx->staging.bssid_addr, + bss_conf->bssid, ETH_ALEN); + + /* currently needed in a few places */ + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + } else { + ctx->staging.filter_flags &= + ~RXON_FILTER_ASSOC_MSK; + } + + } + + /* + * This needs to be after setting the BSSID in case + * mac80211 decides to do both changes at once because + * it will invoke post_associate. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) + iwl_legacy_beacon_update(hw, vif); + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", + bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + } + + if (changes & BSS_CHANGED_ERP_CTS_PROT) { + IWL_DEBUG_MAC80211(priv, + "ERP_CTS %d\n", bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && + (priv->band != IEEE80211_BAND_5GHZ)) + ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + } + + if (changes & BSS_CHANGED_BASIC_RATES) { + /* XXX use this information + * + * To do that, remove code from iwl_legacy_set_rate() and put something + * like this here: + * + if (A-band) + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates; + else + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates >> 4; + ctx->staging.cck_basic_rates = + bss_conf->basic_rates & 0xF; + */ + } + + if (changes & BSS_CHANGED_HT) { + iwl_legacy_ht_conf(priv, vif); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + if (changes & BSS_CHANGED_ASSOC) { + IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); + if (bss_conf->assoc) { + priv->timestamp = bss_conf->timestamp; + + if (!iwl_legacy_is_rfkill(priv)) + priv->cfg->ops->legacy->post_associate(priv); + } else + iwl_legacy_set_no_assoc(priv, vif); + } + + if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) { + IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", + changes); + ret = iwl_legacy_send_rxon_assoc(priv, ctx); + if (!ret) { + /* Sync active_rxon with latest change. */ + memcpy((void *)&ctx->active, + &ctx->staging, + sizeof(struct iwl_legacy_rxon_cmd)); + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + if (vif->bss_conf.enable_beacon) { + memcpy(ctx->staging.bssid_addr, + bss_conf->bssid, ETH_ALEN); + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + priv->cfg->ops->legacy->config_ap(priv); + } else + iwl_legacy_set_no_assoc(priv, vif); + } + + if (changes & BSS_CHANGED_IBSS) { + ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif, + bss_conf->ibss_joined); + if (ret) + IWL_ERR(priv, "failed to %s IBSS station %pM\n", + bss_conf->ibss_joined ? "add" : "remove", + bss_conf->bssid); + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed); + +irqreturn_t iwl_legacy_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + unsigned long flags; + if (!priv) + return IRQ_NONE; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR(priv, + "Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + inta &= ~CSR_INT_BIT_SCD; + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta || inta_fh)) + tasklet_schedule(&priv->irq_tasklet); + +unplugged: + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_HANDLED; + +none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; +} +EXPORT_SYMBOL(iwl_legacy_isr); + +/* + * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this + * function. + */ +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) +{ + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + *tx_flags |= TX_CMD_FLG_RTS_MSK; + *tx_flags &= ~TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + if (!ieee80211_is_mgmt(fc)) + return; + + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + break; + } + } else if (info->control.rates[0].flags & + IEEE80211_TX_RC_USE_CTS_PROTECT) { + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + } +} +EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection); diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h new file mode 100644 index 000000000000..1159b0d255b8 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-core.h @@ -0,0 +1,646 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_core_h__ +#define __iwl_legacy_core_h__ + +/************************ + * forward declarations * + ************************/ +struct iwl_host_cmd; +struct iwl_cmd; + + +#define IWLWIFI_VERSION "in-tree:" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "<ilw@linux.intel.com>" + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +#define TIME_UNIT 1024 + +#define IWL_SKU_G 0x1 +#define IWL_SKU_A 0x2 +#define IWL_SKU_N 0x8 + +#define IWL_CMD(x) case x: return #x + +struct iwl_hcmd_ops { + int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); + int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); + void (*set_rxon_chain)(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +}; + +struct iwl_hcmd_utils_ops { + u16 (*get_hcmd_size)(u8 cmd_id, u16 len); + u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data); + int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); + void (*post_scan)(struct iwl_priv *priv); +}; + +struct iwl_apm_ops { + int (*init)(struct iwl_priv *priv); + void (*config)(struct iwl_priv *priv); +}; + +struct iwl_debugfs_ops { + ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t (*general_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +}; + +struct iwl_temp_ops { + void (*temperature)(struct iwl_priv *priv); +}; + +struct iwl_lib_ops { + /* set hw dependent parameters */ + int (*set_hw_params)(struct iwl_priv *priv); + /* Handling TX */ + void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); + int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, + u16 len, u8 reset, u8 pad); + void (*txq_free_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + int (*txq_init)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + /* setup Rx handler */ + void (*rx_handler_setup)(struct iwl_priv *priv); + /* alive notification after init uCode load */ + void (*init_alive_start)(struct iwl_priv *priv); + /* check validity of rtc data address */ + int (*is_valid_rtc_data_addr)(u32 addr); + /* 1st ucode load */ + int (*load_ucode)(struct iwl_priv *priv); + int (*dump_nic_event_log)(struct iwl_priv *priv, + bool full_log, char **buf, bool display); + void (*dump_nic_error_log)(struct iwl_priv *priv); + int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); + int (*set_channel_switch)(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch); + /* power management */ + struct iwl_apm_ops apm_ops; + + /* power */ + int (*send_tx_power) (struct iwl_priv *priv); + void (*update_chain_flags)(struct iwl_priv *priv); + + /* eeprom operations (as defined in iwl-eeprom.h) */ + struct iwl_eeprom_ops eeprom_ops; + + /* temperature */ + struct iwl_temp_ops temp_ops; + /* check for plcp health */ + bool (*check_plcp_health)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); + + struct iwl_debugfs_ops debugfs_ops; + +}; + +struct iwl_led_ops { + int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); +}; + +struct iwl_legacy_ops { + void (*post_associate)(struct iwl_priv *priv); + void (*config_ap)(struct iwl_priv *priv); + /* station management */ + int (*update_bcast_stations)(struct iwl_priv *priv); + int (*manage_ibss_station)(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); +}; + +struct iwl_ops { + const struct iwl_lib_ops *lib; + const struct iwl_hcmd_ops *hcmd; + const struct iwl_hcmd_utils_ops *utils; + const struct iwl_led_ops *led; + const struct iwl_nic_ops *nic; + const struct iwl_legacy_ops *legacy; + const struct ieee80211_ops *ieee80211_ops; +}; + +struct iwl_mod_params { + int sw_crypto; /* def: 0 = using hardware encryption */ + int disable_hw_scan; /* def: 0 = use h/w scan */ + int num_of_queues; /* def: HW dependent */ + int disable_11n; /* def: 0 = 11n capabilities enabled */ + int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ + int antenna; /* def: 0 = both antennas (use diversity) */ + int restart_fw; /* def: 1 = restart firmware */ +}; + +/* + * @led_compensation: compensate on the led on/off time per HW according + * to the deviation to achieve the desired led frequency. + * The detail algorithm is described in iwl-led.c + * @chain_noise_num_beacons: number of beacons used to compute chain noise + * @plcp_delta_threshold: plcp error rate threshold used to trigger + * radio tuning when there is a high receiving plcp error rate + * @wd_timeout: TX queues watchdog timeout + * @temperature_kelvin: temperature report by uCode in kelvin + * @max_event_log_size: size of event log buffer size for ucode event logging + * @ucode_tracing: support ucode continuous tracing + * @sensitivity_calib_by_driver: driver has the capability to perform + * sensitivity calibration operation + * @chain_noise_calib_by_driver: driver has the capability to perform + * chain noise calibration operation + */ +struct iwl_base_params { + int eeprom_size; + int num_of_queues; /* def: HW dependent */ + int num_of_ampdu_queues;/* def: HW dependent */ + /* for iwl_legacy_apm_init() */ + u32 pll_cfg_val; + bool set_l0s; + bool use_bsm; + + u16 led_compensation; + int chain_noise_num_beacons; + u8 plcp_delta_threshold; + unsigned int wd_timeout; + bool temperature_kelvin; + u32 max_event_log_size; + const bool ucode_tracing; + const bool sensitivity_calib_by_driver; + const bool chain_noise_calib_by_driver; +}; + +/** + * struct iwl_cfg + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre<api>.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @scan_antennas: available antenna for scan operation + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. The firmware's API version will be + * stored in @iwl_priv, enabling the driver to make runtime changes based + * on firmware version used. + * + * For example, + * if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + * Driver interacts with Firmware API version >= 2. + * } else { + * Driver interacts with Firmware API version 1. + * } + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. That is, through utilizing the + * iwl_hcmd_utils_ops etc. we accommodate different command structures + * and flows between hardware versions as well as their API + * versions. + * + */ +struct iwl_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_min; + u8 valid_tx_ant; + u8 valid_rx_ant; + unsigned int sku; + u16 eeprom_ver; + u16 eeprom_calib_ver; + const struct iwl_ops *ops; + /* module based parameters which can be set from modprobe cmd */ + const struct iwl_mod_params *mod_params; + /* params not likely to change within a device family */ + struct iwl_base_params *base_params; + /* params likely to change within a device family */ + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; + enum iwl_led_mode led_mode; +}; + +/*************************** + * L i b * + ***************************/ + +struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg); +int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params); +int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw); +void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + int hw_decrypt); +int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_legacy_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_legacy_set_rxon_channel(struct iwl_priv *priv, + struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx); +void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif); +u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band); +void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf); +bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap); +void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_set_rate(struct iwl_priv *priv); +int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats); +void iwl_legacy_irq_handle_error(struct iwl_priv *priv); +int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p); +int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv); +void iwl_legacy_txq_mem(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv); +void iwl_legacy_free_traffic_mem(struct iwl_priv *priv); +void iwl_legacy_reset_traffic_log(struct iwl_priv *priv); +void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +const char *iwl_legacy_get_mgmt_string(int cmd); +const char *iwl_legacy_get_ctrl_string(int cmd); +void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv); +void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, + u16 len); +#else +static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) +{ + return 0; +} +static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) +{ +} +static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) +{ +} +static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, + __le16 fc, u16 len) +{ +} +#endif +/***************************************************** + * RX handlers. + * **************************************************/ +void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/***************************************************** +* RX +******************************************************/ +void iwl_legacy_cmd_queue_free(struct iwl_priv *priv); +int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv); +void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); +int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q); +void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +/* Handlers */ +void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_recover_from_statistics(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success); +void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); + +/* TX helpers */ + +/***************************************************** +* TX +******************************************************/ +void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id); +void iwl_legacy_setup_watchdog(struct iwl_priv *priv); +/***************************************************** + * TX power + ****************************************************/ +int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); + +/******************************************************************************* + * Rate + ******************************************************************************/ + +u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/******************************************************************************* + * Scanning + ******************************************************************************/ +void iwl_legacy_init_scan_params(struct iwl_priv *priv); +int iwl_legacy_scan_cancel(struct iwl_priv *priv); +int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); +void iwl_legacy_force_scan_end(struct iwl_priv *priv); +int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv); +int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external); +u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ie, int ie_len, int left); +void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv); +u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes); +u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif); +void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv); +void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + +#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) + +/***************************************************** + * S e n d i n g H o s t C o m m a n d s * + *****************************************************/ + +const char *iwl_legacy_get_cmd_string(u8 cmd); +int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); +int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, + u16 len, const void *data); +int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, + const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)); + +int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); + + +/***************************************************** + * PCI * + *****************************************************/ + +static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv) +{ + int pos; + u16 pci_lnk_ctl; + pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP); + pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; +} + +void iwl_legacy_bg_watchdog(unsigned long data); +u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, + u32 usec, u32 beacon_interval); +__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval); + +#ifdef CONFIG_PM +int iwl_legacy_pci_suspend(struct device *device); +int iwl_legacy_pci_resume(struct device *device); +extern const struct dev_pm_ops iwl_legacy_pm_ops; + +#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops) + +#else /* !CONFIG_PM */ + +#define IWL_LEGACY_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +/***************************************************** +* Error Handling Debugging +******************************************************/ +void iwl4965_dump_nic_error_log(struct iwl_priv *priv); +int iwl4965_dump_nic_event_log(struct iwl_priv *priv, + bool full_log, char **buf, bool display); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +#else +static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ +} +#endif + +void iwl_legacy_clear_isr_stats(struct iwl_priv *priv); + +/***************************************************** +* GEOS +******************************************************/ +int iwl_legacy_init_geos(struct iwl_priv *priv); +void iwl_legacy_free_geos(struct iwl_priv *priv); + +/*************** DRIVER STATUS FUNCTIONS *****/ + +#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ +/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */ +#define STATUS_INT_ENABLED 2 +#define STATUS_RF_KILL_HW 3 +#define STATUS_CT_KILL 4 +#define STATUS_INIT 5 +#define STATUS_ALIVE 6 +#define STATUS_READY 7 +#define STATUS_TEMPERATURE 8 +#define STATUS_GEO_CONFIGURED 9 +#define STATUS_EXIT_PENDING 10 +#define STATUS_STATISTICS 12 +#define STATUS_SCANNING 13 +#define STATUS_SCAN_ABORTING 14 +#define STATUS_SCAN_HW 15 +#define STATUS_POWER_PMI 16 +#define STATUS_FW_ERROR 17 + + +static inline int iwl_legacy_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_legacy_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_legacy_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status); +} + +static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv) +{ + return iwl_legacy_is_rfkill_hw(priv); +} + +static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_CT_KILL, &priv->status); +} + +static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_legacy_is_rfkill(priv)) + return 0; + + return iwl_legacy_is_ready(priv); +} + +extern void iwl_legacy_send_bt_config(struct iwl_priv *priv); +extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv, + u8 flags, bool clear); +void iwl_legacy_apm_stop(struct iwl_priv *priv); +int iwl_legacy_apm_init(struct iwl_priv *priv); + +int iwl_legacy_send_rxon_timing(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx); +} +static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); +} +static inline const struct ieee80211_supported_band *iwl_get_hw_mode( + struct iwl_priv *priv, enum ieee80211_band band) +{ + return priv->hw->wiphy->bands[band]; +} + +extern bool bt_coex_active; + +/* mac80211 handlers */ +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw); +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags); + +irqreturn_t iwl_legacy_isr(int irq, void *data); + +#endif /* __iwl_legacy_core_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h new file mode 100644 index 000000000000..668a9616c269 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-csr.h @@ -0,0 +1,422 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_legacy_csr_h__ +#define __iwl_legacy_csr_h__ +/* + * CSR (control and status registers) + * + * CSR registers are mapped directly into PCI bus space, and are accessible + * whenever platform supplies power to device, even when device is in + * low power states due to driver-invoked device resets + * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. + * + * Use iwl_write32() and iwl_read32() family to access these registers; + * these provide simple PCI bus access, without waking up the MAC. + * Do not use iwl_legacy_write_direct32() family for these registers; + * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. + * The MAC (uCode processor, etc.) does not need to be powered up for accessing + * the CSR registers. + * + * NOTE: Device does need to be awake in order to read this memory + * via CSR_EEPROM register + */ +#define CSR_BASE (0x000) + +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_GP_CNTRL (CSR_BASE+0x024) + +/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ +#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) + +/* + * Hardware revision info + * Bit fields: + * 31-8: Reserved + * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions + * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D + * 1-0: "Dash" (-) value, as in A-1, etc. + * + * NOTE: Revision step affects calculation of CCK txpower for 4965. + * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965). + */ +#define CSR_HW_REV (CSR_BASE+0x028) + +/* + * EEPROM memory reads + * + * NOTE: Device must be awake, initialized via apm_ops.init(), + * in order to read. + */ +#define CSR_EEPROM_REG (CSR_BASE+0x02c) +#define CSR_EEPROM_GP (CSR_BASE+0x030) + +#define CSR_GIO_REG (CSR_BASE+0x03C) +#define CSR_GP_UCODE_REG (CSR_BASE+0x048) +#define CSR_GP_DRIVER_REG (CSR_BASE+0x050) + +/* + * UCODE-DRIVER GP (general purpose) mailbox registers. + * SET/CLR registers set/clear bit(s) if "1" is written. + */ +#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) +#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) +#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) +#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) + +#define CSR_LED_REG (CSR_BASE+0x094) +#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) + +/* Analog phase-lock-loop configuration */ +#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) + +/* + * CSR Hardware Revision Workaround Register. Indicates hardware rev; + * "step" determines CCK backoff for txpower calculation. Used for 4965 only. + * See also CSR_HW_REV register. + * Bit fields: + * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step + * 1-0: "Dash" (-) value, as in C-1, etc. + */ +#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) + +#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) +#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) + +/* Bits for CSR_HW_IF_CONFIG_REG */ +#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) +#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) +#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) + +#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100) +#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200) +#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400) +#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800) +#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) +#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) + +#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) +#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ +#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ + +#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ +#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ + +/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), + * acknowledged (reset) by host writing "1" to flagged bits. */ +#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ +#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ +#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ + +#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ + CSR_INT_BIT_HW_ERR | \ + CSR_INT_BIT_FH_TX | \ + CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_RF_KILL | \ + CSR_INT_BIT_SW_RX | \ + CSR_INT_BIT_WAKEUP | \ + CSR_INT_BIT_ALIVE) + +/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ +#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ +#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ +#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ + +#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR39_FH_INT_BIT_RX_CHNL2 | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + + +#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ + CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + +#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +/* GPIO */ +#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) +#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) +#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) + +/* RESET */ +#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) +#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) +#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) +#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) +#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) +#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) + +/* + * GP (general purpose) CONTROL REGISTER + * Bit fields: + * 27: HW_RF_KILL_SW + * Indicates state of (platform's) hardware RF-Kill switch + * 26-24: POWER_SAVE_TYPE + * Indicates current power-saving mode: + * 000 -- No power saving + * 001 -- MAC power-down + * 010 -- PHY (radio) power-down + * 011 -- Error + * 9-6: SYS_CONFIG + * Indicates current system configuration, reflecting pins on chip + * as forced high/low by device circuit board. + * 4: GOING_TO_SLEEP + * Indicates MAC is entering a power-saving sleep power-down. + * Not a good time to access device-internal resources. + * 3: MAC_ACCESS_REQ + * Host sets this to request and maintain MAC wakeup, to allow host + * access to device-internal resources. Host must wait for + * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR + * device registers. + * 2: INIT_DONE + * Host sets this to put device into fully operational D0 power mode. + * Host resets this after SW_RESET to put device into low power mode. + * 0: MAC_CLOCK_READY + * Indicates MAC (ucode processor, etc.) is powered up and can run. + * Internal resources are accessible. + * NOTE: This does not indicate that the processor is actually running. + * NOTE: This does not indicate that 4965 or 3945 has completed + * init or post-power-down restore of internal SRAM memory. + * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that + * SRAM is restored and uCode is in normal operation mode. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + * NOTE: After device reset, this bit remains "0" until host sets + * INIT_DONE + */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) +#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) +#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) + +#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) + +#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) +#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + + +/* EEPROM REG */ +#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) +#define CSR_EEPROM_REG_BIT_CMD (0x00000002) +#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) +#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) + +/* EEPROM GP */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ +#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) + +/* GP REG */ +#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ +#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) +#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) +#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) +#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) + + +/* CSR GIO */ +#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) + +/* + * UCODE-DRIVER GP (general purpose) mailbox register 1 + * Host driver and uCode write and/or read this register to communicate with + * each other. + * Bit fields: + * 4: UCODE_DISABLE + * Host sets this to request permanent halt of uCode, same as + * sending CARD_STATE command with "halt" bit set. + * 3: CT_KILL_EXIT + * Host sets this to request exit from CT_KILL state, i.e. host thinks + * device temperature is low enough to continue normal operation. + * 2: CMD_BLOCKED + * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) + * to release uCode to clear all Tx and command queues, enter + * unassociated mode, and power down. + * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. + * 1: SW_BIT_RFKILL + * Host sets this when issuing CARD_STATE command to request + * device sleep. + * 0: MAC_SLEEP + * uCode sets this when preparing a power-saving power-down. + * uCode resets this when power-up is complete and SRAM is sane. + * NOTE: 3945/4965 saves internal SRAM data to host when powering down, + * and must restore this data after powering back up. + * MAC_SLEEP is the best indication that restore is complete. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + */ +#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) +#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) +#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) +#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) +#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) + +/* LED */ +#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) +#define CSR_LED_REG_TRUN_ON (0x78) +#define CSR_LED_REG_TRUN_OFF (0x38) + +/* ANA_PLL */ +#define CSR39_ANA_PLL_CFG_VAL (0x01000000) + +/* HPET MEM debug */ +#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) + +/* DRAM INT TABLE */ +#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) + +/* + * HBUS (Host-side Bus) + * + * HBUS registers are mapped directly into PCI bus space, but are used + * to indirectly access device's internal memory or registers that + * may be powered-down. + * + * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family + * for these registers; + * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ + * to make sure the MAC (uCode processor, etc.) is powered up for accessing + * internal resources. + * + * Do not use iwl_write32()/iwl_read32() family to access these registers; + * these provide only simple PCI bus access, without waking up the MAC. + */ +#define HBUS_BASE (0x400) + +/* + * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM + * structures, error log, event log, verifying uCode load). + * First write to address register, then read from or write to data register + * to complete the job. Once the address register is set up, accesses to + * data registers auto-increment the address by one dword. + * Bit usage for address registers (read or write): + * 0-31: memory address within device + */ +#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) +#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) +#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) +#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) + +/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ +#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) +#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) + +/* + * Registers for accessing device's internal peripheral registers + * (e.g. SCD, BSM, etc.). First write to address register, + * then read from or write to data register to complete the job. + * Bit usage for address registers (read or write): + * 0-15: register address (offset) within device + * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) + */ +#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) +#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) +#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) +#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) + +/* + * Per-Tx-queue write pointer (index, really!) + * Indicates index to next TFD that driver will fill (1 past latest filled). + * Bit usage: + * 0-7: queue write index + * 11-8: queue selector + */ +#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) + +#endif /* !__iwl_legacy_csr_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h new file mode 100644 index 000000000000..665789f3e75d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-debug.h @@ -0,0 +1,198 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_debug_h__ +#define __iwl_legacy_debug_h__ + +struct iwl_priv; +extern u32 iwl_debug_level; + +#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a) +#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a) +#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a) +#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a) + +#define iwl_print_hex_error(priv, p, len) \ +do { \ + print_hex_dump(KERN_ERR, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define IWL_DEBUG(__priv, level, fmt, args...) \ +do { \ + if (iwl_legacy_get_debug_level(__priv) & (level)) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \ +do { \ + if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define iwl_print_hex_dump(priv, level, p, len) \ +do { \ + if (iwl_legacy_get_debug_level(priv) & level) \ + print_hex_dump(KERN_DEBUG, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#else +#define IWL_DEBUG(__priv, level, fmt, args...) +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) +static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, + const void *p, u32 len) +{} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name); +void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv); +#else +static inline int +iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + return 0; +} +static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) +{ +} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of + * + * #define IWL_DL_xxxx VALUE + * + * where xxxx should be the name of the classification (for example, WEP). + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * The active debug levels can be accessed via files + * + * /sys/module/iwl4965/parameters/debug{50} + * /sys/module/iwl3945/parameters/debug + * /sys/class/net/wlan0/device/debug_level + * + * when CONFIG_IWLWIFI_LEGACY_DEBUG=y. + */ + +/* 0x0000000F - 0x00000001 */ +#define IWL_DL_INFO (1 << 0) +#define IWL_DL_MAC80211 (1 << 1) +#define IWL_DL_HCMD (1 << 2) +#define IWL_DL_STATE (1 << 3) +/* 0x000000F0 - 0x00000010 */ +#define IWL_DL_MACDUMP (1 << 4) +#define IWL_DL_HCMD_DUMP (1 << 5) +#define IWL_DL_EEPROM (1 << 6) +#define IWL_DL_RADIO (1 << 7) +/* 0x00000F00 - 0x00000100 */ +#define IWL_DL_POWER (1 << 8) +#define IWL_DL_TEMP (1 << 9) +#define IWL_DL_NOTIF (1 << 10) +#define IWL_DL_SCAN (1 << 11) +/* 0x0000F000 - 0x00001000 */ +#define IWL_DL_ASSOC (1 << 12) +#define IWL_DL_DROP (1 << 13) +#define IWL_DL_TXPOWER (1 << 14) +#define IWL_DL_AP (1 << 15) +/* 0x000F0000 - 0x00010000 */ +#define IWL_DL_FW (1 << 16) +#define IWL_DL_RF_KILL (1 << 17) +#define IWL_DL_FW_ERRORS (1 << 18) +#define IWL_DL_LED (1 << 19) +/* 0x00F00000 - 0x00100000 */ +#define IWL_DL_RATE (1 << 20) +#define IWL_DL_CALIB (1 << 21) +#define IWL_DL_WEP (1 << 22) +#define IWL_DL_TX (1 << 23) +/* 0x0F000000 - 0x01000000 */ +#define IWL_DL_RX (1 << 24) +#define IWL_DL_ISR (1 << 25) +#define IWL_DL_HT (1 << 26) +#define IWL_DL_IO (1 << 27) +/* 0xF0000000 - 0x10000000 */ +#define IWL_DL_11H (1 << 28) +#define IWL_DL_STATS (1 << 29) +#define IWL_DL_TX_REPLY (1 << 30) +#define IWL_DL_QOS (1 << 31) + +#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) +#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) +#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) +#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) +#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a) +#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a) +#define IWL_DEBUG_ASSOC(p, f, a...) \ + IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) + +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c new file mode 100644 index 000000000000..6ea9c4fbcd3a --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c @@ -0,0 +1,1467 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include <linux/ieee80211.h> +#include <net/mac80211.h> + + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, priv, \ + &iwl_legacy_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ +static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ +static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos); + + +static int +iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .read = iwl_legacy_dbgfs_##name##_read, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .write = iwl_legacy_dbgfs_##name##_write, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .write = iwl_legacy_dbgfs_##name##_write, \ + .read = iwl_legacy_dbgfs_##name##_read, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_mgmt_string(cnt), + priv->tx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_ctrl_string(cnt), + priv->tx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->tx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->tx_stats.data_bytes); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 clear_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &clear_flag) != 1) + return -EFAULT; + iwl_legacy_clear_traffic_stats(priv); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_mgmt_string(cnt), + priv->rx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_ctrl_string(cnt), + priv->rx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->rx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->rx_stats.data_bytes); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +#define BYTE1_MASK 0x000000ff; +#define BYTE2_MASK 0x0000ffff; +#define BYTE3_MASK 0x00ffffff; +static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + u32 val; + char *buf; + ssize_t ret; + int i; + int pos = 0; + struct iwl_priv *priv = file->private_data; + size_t bufsz; + + /* default is to dump the entire data segment */ + if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + priv->dbgfs_sram_offset = 0x800000; + if (priv->ucode_type == UCODE_INIT) + priv->dbgfs_sram_len = priv->ucode_init_data.len; + else + priv->dbgfs_sram_len = priv->ucode_data.len; + } + bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + priv->dbgfs_sram_len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + priv->dbgfs_sram_offset); + for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { + val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \ + priv->dbgfs_sram_len - i); + if (i < 4) { + switch (i) { + case 1: + val &= BYTE1_MASK; + break; + case 2: + val &= BYTE2_MASK; + break; + case 3: + val &= BYTE3_MASK; + break; + } + } + if (!(i % 16)) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = len; + } else { + priv->dbgfs_sram_offset = 0; + priv->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t +iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_station_entry *station; + int max_sta = priv->hw_params.max_stations; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + priv->num_stations); + + for (i = 0; i < max_sta; i++) { + station = &priv->stations[i]; + if (!station->used) + continue; + pos += scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", + i, station->sta.sta.addr, + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, + "TID\tseq_num\ttxq_id\tframes\ttfds\t"); + pos += scnprintf(buf + pos, bufsz - pos, + "start_idx\tbitmap\t\t\trate_n_flags\n"); + + for (j = 0; j < MAX_TID_COUNT; j++) { + pos += scnprintf(buf + pos, bufsz - pos, + "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", + j, station->tid[j].seq_number, + station->tid[j].agg.txq_id, + station->tid[j].agg.frame_count, + station->tid[j].tfds_in_queue, + station->tid[j].agg.start_idx, + station->tid[j].agg.bitmap, + station->tid[j].agg.rate_n_flags); + + if (station->tid[j].agg.wait_for_ba) + pos += scnprintf(buf + pos, bufsz - pos, + " - waitforba"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 eeprom_ver; + size_t eeprom_len = priv->cfg->base_params->eeprom_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) { + IWL_ERR(priv, "NVM size is not multiple of 16.\n"); + return -ENODATA; + } + + ptr = priv->eeprom; + if (!ptr) { + IWL_ERR(priv, "Invalid EEPROM memory\n"); + return -ENOMEM; + } + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); + pos += scnprintf(buf + pos, buf_size - pos, "EEPROM " + "version: 0x%x\n", eeprom_ver); + for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, + buf_size - pos, 0); + pos += strlen(buf + pos); + if (buf_size - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -ENOMEM; + + ret = pos = priv->cfg->ops->lib->dump_nic_event_log( + priv, true, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + } + return ret; +} + +static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &event_log_flag) != 1) + return -EFAULT; + if (event_log_flag == 1) + priv->cfg->ops->lib->dump_nic_event_log(priv, true, + NULL, false); + + return count; +} + + + +static ssize_t +iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", + test_bit(STATUS_HCMD_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", + test_bit(STATUS_INT_ENABLED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", + test_bit(STATUS_CT_KILL, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", + test_bit(STATUS_INIT, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", + test_bit(STATUS_TEMPERATURE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", + test_bit(STATUS_GEO_CONFIGURED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, + "Interrupt Statistics Report:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + priv->isr_stats.hw); + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + priv->isr_stats.sw); + if (priv->isr_stats.sw || priv->isr_stats.hw) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + priv->isr_stats.err_code); + } +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + priv->isr_stats.sch); + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + priv->isr_stats.alive); +#endif + pos += scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", + priv->isr_stats.rfkill); + + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + priv->isr_stats.ctkill); + + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + priv->isr_stats.wakeup); + + pos += scnprintf(buf + pos, bufsz - pos, + "Rx command responses:\t\t %u\n", + priv->isr_stats.rx); + for (cnt = 0; cnt < REPLY_MAX; cnt++) { + if (priv->isr_stats.rx_handlers[cnt] > 0) + pos += scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + iwl_legacy_get_cmd_string(cnt), + priv->isr_stats.rx_handlers[cnt]); + } + + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + priv->isr_stats.tx); + + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + priv->isr_stats.unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + iwl_legacy_clear_isr_stats(priv); + + return count; +} + +static ssize_t +iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_rxon_context *ctx; + int pos = 0, i; + char buf[256 * NUM_IWL_RXON_CTX]; + const size_t bufsz = sizeof(buf); + + for_each_context(priv, ctx) { + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", + ctx->ctxid); + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + ctx->qos_data.def_qos_parm.ac[i].cw_min, + ctx->qos_data.def_qos_parm.ac[i].cw_max, + ctx->qos_data.def_qos_parm.ac[i].aifsn, + ctx->qos_data.def_qos_parm.ac[i].edca_txop); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!iwl_legacy_is_any_associated(priv)) + priv->disable_ht40 = ht40 ? true : false; + else { + IWL_ERR(priv, "Sta associated with AP - " + "Change to 40MHz channel support is not allowed\n"); + return -EINVAL; + } + + return count; +} + +static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "11n 40MHz Mode: %s\n", + priv->disable_ht40 ? "Disabled" : "Enabled"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_WRITE_FILE_OPS(log_event); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); + +static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0; + int cnt = 0, entry; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_rx_queue *rxq = &priv->rxq; + char *buf; + int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + + (priv->cfg->base_params->num_of_queues * 32 * 8) + 400; + const u8 *ptr; + ssize_t ret; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate buffer\n"); + return -ENOMEM; + } + pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "q[%d]: read_ptr: %u, write_ptr: %u\n", + cnt, q->read_ptr, q->write_ptr); + } + if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) { + ptr = priv->tx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx Traffic idx: %u\n", priv->tx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "read: %u, write: %u\n", + rxq->read, rxq->write); + + if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) { + ptr = priv->rx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Rx Traffic idx: %u\n", priv->rx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int traffic_log; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &traffic_log) != 1) + return -EFAULT; + if (traffic_log == 0) + iwl_legacy_reset_traffic_log(priv); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + char *buf; + int pos = 0; + int cnt; + int ret; + const size_t bufsz = sizeof(char) * 64 * + priv->cfg->base_params->num_of_queues; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "hwq %.2d: read=%u write=%u stop=%d" + " swq_id=%#.2x (ac %d/hwq %d)\n", + cnt, q->read_ptr, q->write_ptr, + !!test_bit(cnt, priv->queue_stopped), + txq->swq_id, txq->swq_id & 3, + (txq->swq_id >> 2) & 0x1f); + if (cnt >= 4) + continue; + /* for the ACs, display the stop count too */ + pos += scnprintf(buf + pos, bufsz - pos, + " stop-count: %d\n", + atomic_read(&priv->queue_stop_count[cnt])); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_rx_queue *rxq = &priv->rxq; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "closed_rb_num: Not Allocated\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; + ssize_t ret; + struct iwl_sensitivity_data *data; + + data = &priv->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + + +static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; + ssize_t ret; + struct iwl_chain_noise_data *data; + + data = &priv->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + iwl_legacy_send_statistics_request(priv, CMD_SYNC, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[128]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", + priv->event_log.ucode_trace ? "On" : "Off"); + pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n", + priv->event_log.non_wraps_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n", + priv->event_log.wraps_once_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", + priv->event_log.wraps_more_count); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &trace) != 1) + return -EFAULT; + + if (trace) { + priv->event_log.ucode_trace = true; + /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } else { + priv->event_log.ucode_trace = false; + del_timer_sync(&priv->ucode_trace); + } + + return count; +} + +static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -EFAULT; + + if (priv->cfg->ops->lib->dump_fh) { + ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, + count, ppos, buf, pos); + kfree(buf); + } + } + + return ret; +} + +static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", + priv->missed_beacon_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || + missed > IWL_MISSED_BEACON_THRESHOLD_MAX) + priv->missed_beacon_threshold = + IWL_MISSED_BEACON_THRESHOLD_DEF; + else + priv->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%u\n", + priv->cfg->base_params->plcp_delta_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &plcp) != 1) + return -EINVAL; + if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || + (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) + priv->cfg->base_params->plcp_delta_threshold = + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; + else + priv->cfg->base_params->plcp_delta_threshold = plcp; + return count; +} + +static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int i, pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct iwl_force_reset *force_reset; + + for (i = 0; i < IWL_MAX_FORCE_RESET; i++) { + force_reset = &priv->force_reset[i]; + pos += scnprintf(buf + pos, bufsz - pos, + "Force reset method %d\n", i); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + force_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + force_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + force_reset->reset_reject_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\treset duration: %lu\n", + force_reset->reset_duration); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int reset, ret; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &reset) != 1) + return -EINVAL; + switch (reset) { + case IWL_RF_RESET: + case IWL_FW_RESET: + ret = iwl_legacy_force_reset(priv, reset, true); + break; + default: + return -EINVAL; + } + return ret ? ret : count; +} + +static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int timeout; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &timeout) != 1) + return -EINVAL; + if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) + timeout = IWL_DEF_WD_TIMEOUT; + + priv->cfg->base_params->wd_timeout = timeout; + iwl_legacy_setup_watchdog(priv); + return count; +} + +DEBUGFS_READ_FILE_OPS(rx_statistics); +DEBUGFS_READ_FILE_OPS(tx_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_READ_FILE_OPS(tx_queue); +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); +DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); +DEBUGFS_READ_WRITE_FILE_OPS(force_reset); +DEBUGFS_READ_FILE_OPS(rxon_flags); +DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(wd_timeout); + +/* + * Create the debugfs files and directories + * + */ +int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + struct dentry *phyd = priv->hw->wiphy->debugfsdir; + struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + + dir_drv = debugfs_create_dir(name, phyd); + if (!dir_drv) + return -ENOMEM; + + priv->debugfs_dir = dir_drv; + + dir_data = debugfs_create_dir("data", dir_drv); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dir_drv); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dir_drv); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + + if (priv->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + if (priv->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + if (priv->cfg->base_params->ucode_tracing) + DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); + if (priv->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, + &priv->disable_sens_cal); + if (priv->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, + &priv->disable_chain_noise_cal); + DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, + &priv->disable_tx_power_cal); + return 0; + +err: + IWL_ERR(priv, "Can't create the debugfs directory\n"); + iwl_legacy_dbgfs_unregister(priv); + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_dbgfs_register); + +/** + * Remove the debugfs files and directories + * + */ +void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) +{ + if (!priv->debugfs_dir) + return; + + debugfs_remove_recursive(priv->debugfs_dir); + priv->debugfs_dir = NULL; +} +EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister); diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h new file mode 100644 index 000000000000..25718cf9919a --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-dev.h @@ -0,0 +1,1426 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-dev.h) for driver implementation definitions. + * Please use iwl-commands.h for uCode API definitions. + * Please use iwl-4965-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_legacy_dev_h__ +#define __iwl_legacy_dev_h__ + +#include <linux/pci.h> /* for struct pci_device_id */ +#include <linux/kernel.h> +#include <linux/leds.h> +#include <linux/wait.h> +#include <net/ieee80211_radiotap.h> + +#include "iwl-eeprom.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-debug.h" +#include "iwl-4965-hw.h" +#include "iwl-3945-hw.h" +#include "iwl-led.h" +#include "iwl-power.h" +#include "iwl-legacy-rs.h" + +struct iwl_tx_queue; + +/* CT-KILL constants */ +#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; +}; + +#define rxb_addr(r) page_address(r->page) + +/* defined below */ +struct iwl_device_cmd; + +struct iwl_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct iwl_host_cmd *source; + /* + * only for ASYNC commands + * (which is somewhat stupid -- look at iwl-sta.c for instance + * which duplicates a bunch of code because the callback isn't + * invoked for SYNC commands, if it were and its result passed + * through it would be simpler...) + */ + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ + /* use for monitoring and recovering the stuck queue */ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +} __packed; + +/* One for each TFD */ +struct iwl_tx_info { + struct sk_buff *skb; + struct iwl_rxon_context *ctx; +}; + +/** + * struct iwl_tx_queue - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @bd: base of circular buffer of TFDs + * @cmd: array of command/TX buffer pointers + * @meta: array of meta data for each command/tx buffer + * @dma_addr_cmd: physical address of cmd/tx buffer array + * @txb: array of per-TFD driver data + * @time_stamp: time (in jiffies) of last read_ptr change + * @need_update: indicates need to update read/write index + * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + */ +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +struct iwl_tx_queue { + struct iwl_queue q; + void *tfds; + struct iwl_device_cmd **cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_info *txb; + unsigned long time_stamp; + u8 need_update; + u8 sched_retry; + u8 active; + u8 swq_id; +}; + +#define IWL_NUM_SCAN_RATES (2) + +struct iwl4965_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct iwl4965_channel_tgh_info { + s64 last_radar_time; +}; + +#define IWL4965_MAX_RATE (33) + +struct iwl3945_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IWL_MAX_RATES]; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power index must also be set. */ +struct iwl3945_channel_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 base_power_index; /* gain index for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct iwl3945_scan_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +struct iwl_channel_info { + struct iwl4965_channel_tgd_info tgd; + struct iwl4965_channel_tgh_info tgh; + struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for + * HT40 channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ + enum ieee80211_band band; + + /* HT40 channel info */ + s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + u8 ht40_flags; /* flags copied from EEPROM */ + u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (indexes). */ + struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE]; + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; +}; + +#define IWL_TX_FIFO_BK 0 /* shared */ +#define IWL_TX_FIFO_BE 1 +#define IWL_TX_FIFO_VI 2 /* shared */ +#define IWL_TX_FIFO_VO 3 +#define IWL_TX_FIFO_UNUSED -1 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate the 4 standard TX queues, 1 command + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ +#define IWL_MIN_NUM_QUEUES 10 + +#define IWL_DEFAULT_CMD_QUEUE_NUM 4 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl_frame { + union { + struct ieee80211_hdr frame; + struct iwl_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + CMD_SYNC = 0, + CMD_SIZE_NORMAL = 0, + CMD_NO_SKB = 0, + CMD_SIZE_HUGE = (1 << 0), + CMD_ASYNC = (1 << 1), + CMD_WANT_SKB = (1 << 2), +}; + +#define DEF_CMD_PAYLOAD_SIZE 320 + +/** + * struct iwl_device_cmd + * + * For allocation of the command and tx queues, this establishes the overall + * size of the largest command we send to uCode, except for a scan command + * (which is relatively huge; space is allocated separately). + */ +struct iwl_device_cmd { + struct iwl_cmd_header hdr; /* uCode API */ + union { + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct iwl_tx_cmd tx; + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + } __packed cmd; +} __packed; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) + + +struct iwl_host_cmd { + const void *data; + unsigned long reply_page; + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + u32 flags; + u16 len; + u8 id; +}; + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct iwl_rx_queue - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rx_queue { + __le32 *bd; + dma_addr_t bd_dma; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 read; + u32 write; + u32 free_count; + u32 write_actual; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + struct iwl_rb_status *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; +}; + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +/** + * struct iwl_ht_agg -- aggregation status while waiting for block-ack + * @txq_id: Tx queue used for Tx attempt + * @frame_count: # frames attempted by Tx command + * @wait_for_ba: Expect block-ack before next Tx reply + * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window + * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window + * @bitmap1: High order, one bit for each frame pending ACK in Tx window + * @rate_n_flags: Rate at which Tx was attempted + * + * If REPLY_TX indicates that aggregation was attempted, driver must wait + * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info + * until block ack arrives. + */ +struct iwl_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u64 bitmap; + u32 rate_n_flags; +#define IWL_AGG_OFF 0 +#define IWL_AGG_ON 1 +#define IWL_EMPTYING_HW_QUEUE_ADDBA 2 +#define IWL_EMPTYING_HW_QUEUE_DELBA 3 + u8 state; +}; + + +struct iwl_tid_data { + u16 seq_number; /* 4965 only */ + u16 tfds_in_queue; + struct iwl_ht_agg agg; +}; + +struct iwl_hw_key { + u32 cipher; + int keylen; + u8 keyidx; + u8 key[32]; +}; + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0) +#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1) +#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2) +#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3) +#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K + +/* + * Maximal MPDU density for TX aggregation + * 4 - 2us density + * 5 - 4us density + * 6 - 8us density + * 7 - 16us density + */ +#define CFG_HT_MPDU_DENSITY_2USEC (0x4) +#define CFG_HT_MPDU_DENSITY_4USEC (0x5) +#define CFG_HT_MPDU_DENSITY_8USEC (0x6) +#define CFG_HT_MPDU_DENSITY_16USEC (0x7) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC +#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC +#define CFG_HT_MPDU_DENSITY_MIN (0x1) + +struct iwl_ht_config { + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; /* current smps mode */ +}; + +/* QoS structures */ +struct iwl_qos_info { + int qos_active; + struct iwl_qosparam_cmd def_qos_parm; +}; + +/* + * Structure should be accessed with sta_lock held. When station addition + * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only + * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without + * sta_lock held. + */ +struct iwl_station_entry { + struct iwl_legacy_addsta_cmd sta; + struct iwl_tid_data tid[MAX_TID_COUNT]; + u8 used, ctxid; + struct iwl_hw_key keyinfo; + struct iwl_link_quality_cmd *lq; +}; + +struct iwl_station_priv_common { + struct iwl_rxon_context *ctx; + u8 sta_id; +}; + +/* + * iwl_station_priv: Driver's private station information + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is places in that + * space. + * + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct iwl_station_priv { + struct iwl_station_priv_common common; + struct iwl_lq_sta lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; +}; + +/** + * struct iwl_vif_priv - driver's private per-interface information + * + * When mac80211 allocates a virtual interface, it can allocate + * space for us to put data into. + */ +struct iwl_vif_priv { + struct iwl_rxon_context *ctx; + u8 ibss_bssid_sta_id; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct iwl_ucode_header { + __le32 ver; /* major/minor/API/serial */ + struct { + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v1; +}; + +struct iwl4965_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct iwl_sensitivity_ranges { + u16 min_nrg_cck; + u16 max_nrg_cck; + + u16 nrg_th_cck; + u16 nrg_th_ofdm; + + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + + +/** + * struct iwl_hw_params + * @max_txq_num: Max # Tx queues supported + * @dma_chnl_num: Number of Tx DMA/FIFO channels + * @scd_bc_tbls_size: size of scheduler byte count tables + * @tfd_size: TFD size + * @tx/rx_chains_num: Number of TX/RX chains + * @valid_tx/rx_ant: usable antennas + * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) + * @max_rxq_log: Log-base-2 of max_rxq_size + * @rx_page_order: Rx buffer page order + * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR + * @max_stations: + * @ht40_channel: is 40MHz width possible in band 2.4 + * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) + * @sw_crypto: 0 for hw, 1 for sw + * @max_xxx_size: for ucode uses + * @ct_kill_threshold: temperature threshold + * @beacon_time_tsf_bits: number of valid tsf bits for beacon time + * @struct iwl_sensitivity_ranges: range of sensitivity values + */ +struct iwl_hw_params { + u8 max_txq_num; + u8 dma_chnl_num; + u16 scd_bc_tbls_size; + u32 tfd_size; + u8 tx_chains_num; + u8 rx_chains_num; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 max_rxq_size; + u16 max_rxq_log; + u32 rx_page_order; + u32 rx_wrt_ptr_reg; + u8 max_stations; + u8 ht40_channel; + u8 max_beacon_itrvl; /* in 1024 ms */ + u32 max_inst_size; + u32 max_data_size; + u32 max_bsm_size; + u32 ct_kill_threshold; /* value in hw-dependent units */ + u16 beacon_time_tsf_bits; + const struct iwl_sensitivity_ranges *sens; +}; + + +/****************************************************************************** + * + * Functions implemented in core module which are forward declared here + * for use by iwl-[4-5].c + * + * NOTE: The implementation of these functions are not hardware specific + * which is why they are in the core module files. + * + * Naming convention -- + * iwl_ <-- Is part of iwlwifi + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl4965_bg_ <-- Called from work queue context + * iwl4965_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl4965_update_chain_flags(struct iwl_priv *priv); +extern const u8 iwl_bcast_addr[ETH_ALEN]; +extern int iwl_legacy_queue_space(const struct iwl_queue *q); +static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i) +{ + return q->write_ptr >= q->read_ptr ? + (i >= q->read_ptr && i < q->write_ptr) : + !(i < q->read_ptr && i >= q->write_ptr); +} + + +static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index, + int is_huge) +{ + /* + * This is for init calibration result and scan command which + * required buffer > TFD_MAX_PAYLOAD_SIZE, + * the big buffer at end of command array + */ + if (is_huge) + return q->n_window; /* must be power of 2 */ + + /* Otherwise, use normal size buffers */ + return index & (q->n_window - 1); +} + + +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 + +/* Sensitivity and chain noise calibration */ +#define INITIALIZATION_VALUE 0xFFFF +#define IWL4965_CAL_NUM_BEACONS 20 +#define IWL_CAL_NUM_BEACONS 16 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS 3 + +enum iwl4965_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwl4965_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE, + IWL_CHAIN_NOISE_CALIBRATED, + IWL_CHAIN_NOISE_DONE, +}; + +enum iwl4965_calib_enabled_state { + IWL_CALIB_DISABLED = 0, /* must be 0 */ + IWL_CALIB_ENABLED = 1, +}; + +/* + * enum iwl_calib + * defines the order in which results of initial calibrations + * should be sent to the runtime uCode + */ +enum iwl_calib { + IWL_CALIB_MAX, +}; + +/* Opaque calibration results */ +struct iwl_calib_result { + void *buf; + size_t buf_len; +}; + +enum ucode_type { + UCODE_NONE = 0, + UCODE_INIT, + UCODE_RT +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; + u8 state; +}; + +#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ +#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +#define IWL_TRAFFIC_ENTRIES (256) +#define IWL_TRAFFIC_ENTRY_SIZE (64) + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +/* interrupt statistics */ +struct isr_statistics { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 rx_handlers[REPLY_MAX]; + u32 tx; + u32 unhandled; +}; + +/* management statistics */ +enum iwl_mgmt_stats { + MANAGEMENT_ASSOC_REQ = 0, + MANAGEMENT_ASSOC_RESP, + MANAGEMENT_REASSOC_REQ, + MANAGEMENT_REASSOC_RESP, + MANAGEMENT_PROBE_REQ, + MANAGEMENT_PROBE_RESP, + MANAGEMENT_BEACON, + MANAGEMENT_ATIM, + MANAGEMENT_DISASSOC, + MANAGEMENT_AUTH, + MANAGEMENT_DEAUTH, + MANAGEMENT_ACTION, + MANAGEMENT_MAX, +}; +/* control statistics */ +enum iwl_ctrl_stats { + CONTROL_BACK_REQ = 0, + CONTROL_BACK, + CONTROL_PSPOLL, + CONTROL_RTS, + CONTROL_CTS, + CONTROL_ACK, + CONTROL_CFEND, + CONTROL_CFENDACK, + CONTROL_MAX, +}; + +struct traffic_stats { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + u32 mgmt[MANAGEMENT_MAX]; + u32 ctrl[CONTROL_MAX]; + u32 data_cnt; + u64 data_bytes; +#endif +}; + +/* + * iwl_switch_rxon: "channel switch" structure + * + * @ switch_in_progress: channel switch in progress + * @ channel: new channel + */ +struct iwl_switch_rxon { + bool switch_in_progress; + __le16 channel; +}; + +/* + * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds + * to perform continuous uCode event logging operation if enabled + */ +#define UCODE_TRACE_PERIOD (100) + +/* + * iwl_event_log: current uCode event log position + * + * @ucode_trace: enable/disable ucode continuous trace timer + * @num_wraps: how many times the event buffer wraps + * @next_entry: the entry just before the next one that uCode would fill + * @non_wraps_count: counter for no wrap detected when dump ucode events + * @wraps_once_count: counter for wrap once detected when dump ucode events + * @wraps_more_count: counter for wrap more than once detected + * when dump ucode events + */ +struct iwl_event_log { + bool ucode_trace; + u32 num_wraps; + u32 next_entry; + int non_wraps_count; + int wraps_once_count; + int wraps_more_count; +}; + +/* + * host interrupt timeout value + * used with setting interrupt coalescing timer + * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit + * + * default interrupt coalescing timer is 64 x 32 = 2048 usecs + * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs + */ +#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_TIMEOUT_DEF (0x40) +#define IWL_HOST_INT_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) +#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) + +/* + * This is the threshold value of plcp error rate per 100mSecs. It is + * used to set and check for the validity of plcp_delta. + */ +#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1) +#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) +#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) +#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) +#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) +#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0) + +#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) +#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + +/* TX queue watchdog timeouts in mSecs */ +#define IWL_DEF_WD_TIMEOUT (2000) +#define IWL_LONG_WD_TIMEOUT (10000) +#define IWL_MAX_WD_TIMEOUT (120000) + +enum iwl_reset { + IWL_RF_RESET = 0, + IWL_FW_RESET, + IWL_MAX_FORCE_RESET, +}; + +struct iwl_force_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + unsigned long reset_duration; + unsigned long last_force_reset_jiffies; +}; + +/* extend beacon time format bit shifting */ +/* + * for _3945 devices + * bits 31:24 - extended + * bits 23:0 - interval + */ +#define IWL3945_EXT_BEACON_TIME_POS 24 +/* + * for _4965 devices + * bits 31:22 - extended + * bits 21:0 - interval + */ +#define IWL4965_EXT_BEACON_TIME_POS 22 + +enum iwl_rxon_context_id { + IWL_RXON_CTX_BSS, + + NUM_IWL_RXON_CTX +}; + +struct iwl_rxon_context { + struct ieee80211_vif *vif; + + const u8 *ac_to_fifo; + const u8 *ac_to_queue; + u8 mcast_queue; + + /* + * We could use the vif to indicate active, but we + * also need it to be active during disabling when + * we already removed the vif for type setting. + */ + bool always_active, is_active; + + bool ht_need_multiple_chains; + + enum iwl_rxon_context_id ctxid; + + u32 interface_modes, exclusive_interface_modes; + u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; + + /* + * We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware. + */ + const struct iwl_legacy_rxon_cmd active; + struct iwl_legacy_rxon_cmd staging; + + struct iwl_rxon_time_cmd timing; + + struct iwl_qos_info qos_data; + + u8 bcast_sta_id, ap_sta_id; + + u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; + u8 qos_cmd; + u8 wep_key_cmd; + + struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; + u8 key_mapping_keys; + + __le32 station_flags; + + struct { + bool non_gf_sta_present; + u8 protection; + bool enabled, is_40mhz; + u8 extension_chan_offset; + } ht; +}; + +struct iwl_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + struct iwl_cfg *cfg; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + enum ieee80211_band band; + int alloc_rxb_page; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; + + /* ucode beacon time */ + u32 ucode_beacon_time; + int missed_beacon_threshold; + + /* track IBSS manager (last beacon) status */ + u32 ibss_manager; + + /* storing the jiffies when the plcp error rate is received */ + unsigned long plcp_jiffies; + + /* force reset */ + struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; + + /* we allocate array of iwl_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* init calibration results */ + struct iwl_calib_result calib_results[IWL_CALIB_MAX]; + + /* Scan related variables */ + unsigned long scan_start; + unsigned long scan_start_tsf; + void *scan_cmd; + enum ieee80211_band scan_band; + struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; + bool is_internal_short_scan; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + spinlock_t reg_lock; /* protect hw register access */ + struct mutex mutex; + struct mutex sync_cmd_mutex; /* enable serialization of sync commands */ + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + u32 hw_rev; + u32 hw_wa_rev; + u8 rev_id; + + /* microcode/device supports multiple contexts */ + u8 valid_contexts; + + /* command queue number */ + u8 cmd_queue; + + /* max number of station keys */ + u8 sta_key_max_num; + + /* EEPROM MAC addresses */ + struct mac_address addresses[1]; + + /* uCode images, save to reload in case of failure */ + int fw_index; /* firmware we're trying to load */ + u32 ucode_ver; /* version of ucode, copy of + iwl_ucode.ver */ + struct fw_desc ucode_code; /* runtime inst */ + struct fw_desc ucode_data; /* runtime data original */ + struct fw_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_desc ucode_init; /* initialization inst */ + struct fw_desc ucode_init_data; /* initialization data */ + struct fw_desc ucode_boot; /* bootstrap inst */ + enum ucode_type ucode_type; + u8 ucode_write_complete; /* the image write is complete */ + char firmware_name[25]; + + struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; + + struct iwl_switch_rxon switch_rxon; + + /* 1st responses from initialize and runtime uCode images. + * _4965's initialize alive response contains some calibration data. */ + struct iwl_init_alive_resp card_alive_init; + struct iwl_alive_resp card_alive; + + u16 active_rate; + + u8 start_calib; + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; + + struct iwl_ht_config current_ht_config; + + /* Rate scaling data */ + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct iwl_rx_queue rxq; + struct iwl_tx_queue *txq; + unsigned long txq_ctx_active_msk; + struct iwl_dma_ptr kw; /* keep warm address */ + struct iwl_dma_ptr scd_bc_tbls; + + u32 scd_base_addr; /* scheduler sram base address */ + + unsigned long status; + + /* counts mgmt, ctl, and data packets */ + struct traffic_stats tx_stats; + struct traffic_stats rx_stats; + + /* counts interrupts */ + struct isr_statistics isr_stats; + + struct iwl_power_mgr power_data; + + /* context information */ + u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ + + /* station table variables */ + + /* Note: if lock and sta_lock are needed, lock must be acquired first */ + spinlock_t sta_lock; + int num_stations; + struct iwl_station_entry stations[IWL_STATION_COUNT]; + unsigned long ucode_key_table; + + /* queue refcounts */ +#define IWL_MAX_HW_QUEUES 32 + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + /* for each AC */ + atomic_t queue_stop_count[4]; + + /* Indication if ieee80211_ops->open has been called */ + u8 is_open; + + u8 mac80211_registered; + + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + struct iwl_eeprom_calib_info *calib_info; + + enum nl80211_iftype iw_mode; + + /* Last Rx'd beacon timestamp */ + u64 timestamp; + + union { +#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) + struct { + void *shared_virt; + dma_addr_t shared_phys; + + struct delayed_work thermal_periodic; + struct delayed_work rfkill_poll; + + struct iwl3945_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + struct iwl3945_notif_statistics accum_statistics; + struct iwl3945_notif_statistics delta_statistics; + struct iwl3945_notif_statistics max_delta; +#endif + + u32 sta_supp_rates; + int last_rx_rssi; /* From Rx packet statistics */ + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* + * each calibration channel group in the + * EEPROM has a derived clip setting for + * each rate. + */ + const struct iwl3945_clip_group clip_groups[5]; + + } _3945; +#endif +#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) + struct { + /* + * reporting the number of tids has AGG on. 0 means + * no AGGREGATION + */ + u8 agg_tids_count; + + struct iwl_rx_phy_res last_phy_res; + bool last_phy_res_valid; + + struct completion firmware_loading_complete; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + struct iwl_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + struct iwl_notif_statistics accum_statistics; + struct iwl_notif_statistics delta_statistics; + struct iwl_notif_statistics max_delta; +#endif + + } _4965; +#endif + }; + + struct iwl_hw_params hw_params; + + u32 inta_mask; + + struct workqueue_struct *workqueue; + + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct abort_scan; + + struct iwl_rxon_context *beacon_ctx; + struct sk_buff *beacon_skb; + + struct work_struct start_internal_scan; + struct work_struct tx_flush; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work scan_check; + + /* TX Power */ + s8 tx_power_user_lmt; + s8 tx_power_device_lmt; + s8 tx_power_next; + + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + /* debugging info */ + u32 debug_level; /* per device debugging will override global + iwl_debug_level if set */ +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + /* debugfs */ + u16 tx_traffic_idx; + u16 rx_traffic_idx; + u8 *tx_traffic; + u8 *rx_traffic; + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + bool disable_ht40; +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ + + struct work_struct txpower_work; + u32 disable_sens_cal; + u32 disable_chain_noise_cal; + u32 disable_tx_power_cal; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list ucode_trace; + struct timer_list watchdog; + bool hw_ready; + + struct iwl_event_log event_log; + + struct led_classdev led; + unsigned long blink_on, blink_off; + bool led_registered; +}; /*iwl_priv */ + +static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) +{ + set_bit(txq_id, &priv->txq_ctx_active_msk); +} + +static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) +{ + clear_bit(txq_id, &priv->txq_ctx_active_msk); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +/* + * iwl_legacy_get_debug_level: Return active debug level for device + * + * Using sysfs it is possible to set per device debug level. This debug + * level will be used if set, otherwise the global debug level which can be + * set via module parameter is used. + */ +static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) +{ + if (priv->debug_level) + return priv->debug_level; + else + return iwl_debug_level; +} +#else +static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) +{ + return iwl_debug_level; +} +#endif + + +static inline struct ieee80211_hdr * +iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv, + int txq_id, int idx) +{ + if (priv->txq[txq_id].txb[idx].skb) + return (struct ieee80211_hdr *)priv->txq[txq_id]. + txb[idx].skb->data; + return NULL; +} + +static inline struct iwl_rxon_context * +iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + return vif_priv->ctx; +} + +#define for_each_context(priv, ctx) \ + for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ + ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ + if (priv->valid_contexts & BIT(ctx->ctxid)) + +static inline int iwl_legacy_is_associated(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ + return (priv->contexts[ctxid].active.filter_flags & + RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv) +{ + return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); +} + +static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx) +{ + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->band == IEEE80211_BAND_5GHZ; +} + +static inline int +iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline void +__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) +{ + __free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} + +static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page) +{ + free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} +#endif /* __iwl_legacy_dev_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c new file mode 100644 index 000000000000..080b852b33bd --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c @@ -0,0 +1,45 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/module.h> + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ +#include "iwl-dev.h" + +#define CREATE_TRACE_POINTS +#include "iwl-devtrace.h" + +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event); +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h new file mode 100644 index 000000000000..9612aa0f6ec4 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h @@ -0,0 +1,270 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_LEGACY_DEVICE_TRACE + +#include <linux/tracepoint.h> + +#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + + +#define PRIV_ENTRY __field(struct iwl_priv *, priv) +#define PRIV_ASSIGN (__entry->priv = priv) + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_legacy_io + +TRACE_EVENT(iwlwifi_legacy_dev_ioread32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] read io[%#x] = %#x", __entry->priv, + __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_legacy_dev_iowrite8, + TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u8, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, + __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_legacy_dev_iowrite32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, + __entry->offs, __entry->val) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_legacy_ucode + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event, + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_ARGS(priv, time, data, ev), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u", + __entry->priv, __entry->time, __entry->data, __entry->ev) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event, + TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry), + TP_ARGS(priv, wraps, n_entry, p_entry), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, wraps) + __field(u32, n_entry) + __field(u32, p_entry) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->wraps = wraps; + __entry->n_entry = n_entry; + __entry->p_entry = p_entry; + ), + TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X", + __entry->priv, __entry->wraps, __entry->n_entry, + __entry->p_entry) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi + +TRACE_EVENT(iwlwifi_legacy_dev_hcmd, + TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), + TP_ARGS(priv, hcmd, len, flags), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, hcmd, len) + __field(u32, flags) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(hcmd), hcmd, len); + __entry->flags = flags; + ), + TP_printk("[%p] hcmd %#.2x (%ssync)", + __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], + __entry->flags & CMD_ASYNC ? "a" : "") +); + +TRACE_EVENT(iwlwifi_legacy_dev_rx, + TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), + TP_ARGS(priv, rxbuf, len), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, rxbuf, len) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(rxbuf), rxbuf, len); + ), + TP_printk("[%p] RX cmd %#.2x", + __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4]) +); + +TRACE_EVENT(iwlwifi_legacy_dev_tx, + TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, + void *buf0, size_t buf0_len, + void *buf1, size_t buf1_len), + TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(size_t, framelen) + __dynamic_array(u8, tfd, tfdlen) + + /* + * Do not insert between or below these items, + * we want to keep the frame together (except + * for the possible padding). + */ + __dynamic_array(u8, buf0, buf0_len) + __dynamic_array(u8, buf1, buf1_len) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->framelen = buf0_len + buf1_len; + memcpy(__get_dynamic_array(tfd), tfd, tfdlen); + memcpy(__get_dynamic_array(buf0), buf0, buf0_len); + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); + ), + TP_printk("[%p] TX %.2x (%zu bytes)", + __entry->priv, + ((u8 *)__get_dynamic_array(buf0))[0], + __entry->framelen) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_error, + TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, + u32 data1, u32 data2, u32 line, u32 blink1, + u32 blink2, u32 ilink1, u32 ilink2), + TP_ARGS(priv, desc, time, data1, data2, line, + blink1, blink2, ilink1, ilink2), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, desc) + __field(u32, time) + __field(u32, data1) + __field(u32, data2) + __field(u32, line) + __field(u32, blink1) + __field(u32, blink2) + __field(u32, ilink1) + __field(u32, ilink2) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->desc = desc; + __entry->time = time; + __entry->data1 = data1; + __entry->data2 = data2; + __entry->line = line; + __entry->blink1 = blink1; + __entry->blink2 = blink2; + __entry->ilink1 = ilink1; + __entry->ilink2 = ilink2; + ), + TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " + "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", + __entry->priv, __entry->desc, __entry->time, __entry->data1, + __entry->data2, __entry->line, __entry->blink1, + __entry->blink2, __entry->ilink1, __entry->ilink2) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_event, + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_ARGS(priv, time, data, ev), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u", + __entry->priv, __entry->time, __entry->data, __entry->ev) +); +#endif /* __IWLWIFI_DEVICE_TRACE */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c new file mode 100644 index 000000000000..39e577323942 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c @@ -0,0 +1,561 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-io.h" + +/************************** EEPROM BANDS **************************** + * + * The iwl_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +const u8 iwl_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */ + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv) +{ + u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + int ret = 0; + + IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); + switch (gp) { + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + break; + default: + IWL_ERR(priv, "bad EEPROM signature," + "EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + break; + } + return ret; +} + +const u8 +*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) +{ + BUG_ON(offset >= priv->cfg->base_params->eeprom_size); + return &priv->eeprom[offset]; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr); + +u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset) +{ + if (!priv->eeprom) + return 0; + return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); +} +EXPORT_SYMBOL(iwl_legacy_eeprom_query16); + +/** + * iwl_legacy_eeprom_init - read EEPROM contents + * + * Load the EEPROM contents from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_legacy_eeprom_init(struct iwl_priv *priv) +{ + __le16 *e; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + + /* allocate eeprom */ + sz = priv->cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); + priv->eeprom = kzalloc(sz, GFP_KERNEL); + if (!priv->eeprom) { + ret = -ENOMEM; + goto alloc_err; + } + e = (__le16 *)priv->eeprom; + + priv->cfg->ops->lib->apm_ops.init(priv); + + ret = iwl_legacy_eeprom_verify_signature(priv); + if (ret < 0) { + IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + goto err; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); + if (ret < 0) { + IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); + ret = -ENOENT; + goto err; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + _iwl_legacy_write32(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(priv, "Time out reading EEPROM[%d]\n", + addr); + goto done; + } + r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + + IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", + "EEPROM", + iwl_legacy_eeprom_query16(priv, EEPROM_VERSION)); + + ret = 0; +done: + priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); + +err: + if (ret) + iwl_legacy_eeprom_free(priv); + /* Reset chip to save power until we load uCode during "up". */ + iwl_legacy_apm_stop(priv); +alloc_err: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_init); + +void iwl_legacy_eeprom_free(struct iwl_priv *priv) +{ + kfree(priv->eeprom); + priv->eeprom = NULL; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_free); + +static void iwl_legacy_init_band_reference(const struct iwl_priv *priv, + int eep_band, int *eeprom_ch_count, + const struct iwl_eeprom_channel **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + u32 offset = priv->cfg->ops->lib-> + eeprom_ops.regulatory_bands[eep_band - 1]; + switch (eep_band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_1; + break; + case 2: /* 4.9GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_3; + break; + case 4: /* 5.5GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_4; + break; + case 5: /* 5.7GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_5; + break; + case 6: /* 2.4GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_6; + break; + case 7: /* 5 GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwl_eeprom_band_7; + break; + default: + BUG(); + return; + } +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") +/** + * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv. + * + * Does not set up a command, or touch hardware. + */ +static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv, + enum ieee80211_band band, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 clear_ht40_extension_channel) +{ + struct iwl_channel_info *ch_info; + + ch_info = (struct iwl_channel_info *) + iwl_legacy_get_channel_info(priv, band, channel); + + if (!iwl_legacy_is_channel_valid(ch_info)) + return -1; + + IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? + "" : "not "); + + ch_info->ht40_eeprom = *eeprom_ch; + ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; + ch_info->ht40_flags = eeprom_ch->flags; + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) + ch_info->ht40_extension_channel &= + ~clear_ht40_extension_channel; + + return 0; +} + +#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +/** + * iwl_legacy_init_channel_map - Set up driver's info for all possible channels + */ +int iwl_legacy_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n"); + return 0; + } + + IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwl_eeprom_band_1) + + ARRAY_SIZE(iwl_eeprom_band_2) + + ARRAY_SIZE(iwl_eeprom_band_3) + + ARRAY_SIZE(iwl_eeprom_band_4) + + ARRAY_SIZE(iwl_eeprom_band_5); + + IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n", + priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERR(priv, "Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + /* First write that ht40 is not enabled, and then enable + * one by one */ + ch_info->ht40_extension_channel = + IEEE80211_CHAN_NO_HT40; + + if (!(iwl_legacy_is_channel_valid(ch_info))) { + IWL_DEBUG_EEPROM(priv, + "Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] " + "%s%s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the tx_power_user_lmt to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->tx_power_user_lmt) + priv->tx_power_user_lmt = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + /* Check if we do have HT40 channels */ + if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == + EEPROM_REGULATORY_BAND_NO_HT40 && + priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == + EEPROM_REGULATORY_BAND_NO_HT40) + return 0; + + /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ + for (band = 6; band <= 7; band++) { + enum ieee80211_band ieeeband; + + iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ + ieeeband = + (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + /* Set up driver's info for lower half */ + iwl_legacy_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch], + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40PLUS); + + /* Set up driver's info for upper half */ + iwl_legacy_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch] + 4, + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40MINUS); + } + } + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_init_channel_map); + +/* + * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map + */ +void iwl_legacy_free_channel_map(struct iwl_priv *priv) +{ + kfree(priv->channel_info); + priv->channel_count = 0; +} +EXPORT_SYMBOL(iwl_legacy_free_channel_map); + +/** + * iwl_legacy_get_channel_info - Find driver's private channel info + * + * Based on band and channel number. + */ +const struct +iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel) +{ + int i; + + switch (band) { + case IEEE80211_BAND_5GHZ: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + case IEEE80211_BAND_2GHZ: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + default: + BUG(); + } + + return NULL; +} +EXPORT_SYMBOL(iwl_legacy_get_channel_info); diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h new file mode 100644 index 000000000000..0744f8da63b4 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h @@ -0,0 +1,344 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_eeprom_h__ +#define __iwl_legacy_eeprom_h__ + +#include <net/mac80211.h> + +struct iwl_priv; + +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ + +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +/* + * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags. + * + * IBSS and/or AP operation is allowed *only* on those channels with + * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because + * RADAR detection is not supported by the 4965 driver, but is a + * requirement for establishing a new network for legal operation on channels + * requiring RADAR detection or restricting ACTIVE scanning. + * + * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. + * It only indicates that 20 MHz channel use is supported; HT40 channel + * usage is indicated by a separate set of regulatory flags for each + * HT40 channel pair. + * + * NOTE: Using a channel inappropriately will result in a uCode error! + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ + /* Bit 6 Reserved (was Narrow Channel) */ + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* SKU Capabilities */ +/* 3945 only */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) + +/* *regulatory* channel data format in eeprom, one for each channel. + * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ +struct iwl_eeprom_channel { + u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __packed; + +/* 3945 Specific */ +#define EEPROM_3945_EEPROM_VERSION (0x2f) + +/* 4965 has two radio transmitters (and 3 radio receivers) */ +#define EEPROM_TX_POWER_TX_CHAINS (2) + +/* 4965 has room for up to 8 sets of txpower calibration data */ +#define EEPROM_TX_POWER_BANDS (8) + +/* 4965 factory calibration measures txpower gain settings for + * each of 3 target output levels */ +#define EEPROM_TX_POWER_MEASUREMENTS (3) + +/* 4965 Specific */ +/* 4965 driver does not work with txpower calibration version < 5 */ +#define EEPROM_4965_TX_POWER_VERSION (5) +#define EEPROM_4965_EEPROM_VERSION (0x2f) +#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ +#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + +/* 2.4 GHz */ +extern const u8 iwl_eeprom_band_1[14]; + +/* + * factory calibration data for one txpower level, on one channel, + * measured on one of the 2 tx chains (radio transmitter and associated + * antenna). EEPROM contains: + * + * 1) Temperature (degrees Celsius) of device when measurement was made. + * + * 2) Gain table index used to achieve the target measurement power. + * This refers to the "well-known" gain tables (see iwl-4965-hw.h). + * + * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). + * + * 4) RF power amplifier detector level measurement (not used). + */ +struct iwl_eeprom_calib_measure { + u8 temperature; /* Device temperature (Celsius) */ + u8 gain_idx; /* Index into gain table */ + u8 actual_pow; /* Measured RF output power, half-dBm */ + s8 pa_det; /* Power amp detector level (not used) */ +} __packed; + + +/* + * measurement set for one channel. EEPROM contains: + * + * 1) Channel number measured + * + * 2) Measurements for each of 3 power levels for each of 2 radio transmitters + * (a.k.a. "tx chains") (6 measurements altogether) + */ +struct iwl_eeprom_calib_ch_info { + u8 ch_num; + struct iwl_eeprom_calib_measure + measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __packed; + +/* + * txpower subband info. + * + * For each frequency subband, EEPROM contains the following: + * + * 1) First and last channels within range of the subband. "0" values + * indicate that this sample set is not being used. + * + * 2) Sample measurement sets for 2 channels close to the range endpoints. + */ +struct iwl_eeprom_calib_subband_info { + u8 ch_from; /* channel number of lowest channel in subband */ + u8 ch_to; /* channel number of highest channel in subband */ + struct iwl_eeprom_calib_ch_info ch1; + struct iwl_eeprom_calib_ch_info ch2; +} __packed; + + +/* + * txpower calibration info. EEPROM contains: + * + * 1) Factory-measured saturation power levels (maximum levels at which + * tx power amplifier can output a signal without too much distortion). + * There is one level for 2.4 GHz band and one for 5 GHz band. These + * values apply to all channels within each of the bands. + * + * 2) Factory-measured power supply voltage level. This is assumed to be + * constant (i.e. same value applies to all channels/bands) while the + * factory measurements are being made. + * + * 3) Up to 8 sets of factory-measured txpower calibration values. + * These are for different frequency ranges, since txpower gain + * characteristics of the analog radio circuitry vary with frequency. + * + * Not all sets need to be filled with data; + * struct iwl_eeprom_calib_subband_info contains range of channels + * (0 if unused) for each set of data. + */ +struct iwl_eeprom_calib_info { + u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ + u8 saturation_power52; /* half-dBm */ + __le16 voltage; /* signed */ + struct iwl_eeprom_calib_subband_info + band_info[EEPROM_TX_POWER_BANDS]; +} __packed; + + +/* General */ +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ +#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ +#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ +#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ + +/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ +#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ +#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 +#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by iwl has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) + * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + +/* + * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) + * + * The channel listed is the center of the lower 20 MHz half of the channel. + * The overall center frequency is actually 2 channels (10 MHz) above that, + * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away + * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, + * and the overall HT40 channel width centers on channel 3. + * + * NOTE: The RXON command uses 20 MHz channel numbers to specify the + * control channel to which to tune. RXON also specifies whether the + * control channel is the upper or lower half of a HT40 channel. + * + * NOTE: 4965 does not support HT40 channels on 2.4 GHz. + */ +#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ + +/* + * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), + * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) + */ +#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ + +#define EEPROM_REGULATORY_BAND_NO_HT40 (0) + +struct iwl_eeprom_ops { + const u32 regulatory_bands[7]; + int (*acquire_semaphore) (struct iwl_priv *priv); + void (*release_semaphore) (struct iwl_priv *priv); +}; + + +int iwl_legacy_eeprom_init(struct iwl_priv *priv); +void iwl_legacy_eeprom_free(struct iwl_priv *priv); +const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, + size_t offset); +u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset); +int iwl_legacy_init_channel_map(struct iwl_priv *priv); +void iwl_legacy_free_channel_map(struct iwl_priv *priv); +const struct iwl_channel_info *iwl_legacy_get_channel_info( + const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel); + +#endif /* __iwl_legacy_eeprom_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h new file mode 100644 index 000000000000..4e20c7e5c883 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-fh.h @@ -0,0 +1,513 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_legacy_fh_h__ +#define __iwl_legacy_fh_h__ + +/****************************/ +/* Flow Handler Definitions */ +/****************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x2000) + +/** + * Keep-Warm (KW) buffer base address. + * + * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the + * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency + * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host + * from going into a power-savings mode that would cause higher DRAM latency, + * and possible data over/under-runs, before all Tx/Rx is complete. + * + * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) + * of the buffer, which must be 4K aligned. Once this is set up, the 4965 + * automatically invokes keep-warm accesses when normal accesses might not + * be sufficient to maintain fast DRAM response. + * + * Bit fields: + * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned + */ +#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + + +/** + * TFD Circular Buffers Base (CBBC) addresses + * + * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident + * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) + * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte + * aligned (address bits 0-7 must be 0). + * + * Bit fields in each pointer register: + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned + */ +#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) + +/* Find TFD CB base pointer for given queue (range 0-15). */ +#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + + +/** + * Rx SRAM Control and Status Registers (RSCSR) + * + * These registers provide handshake between driver and 4965 for the Rx queue + * (this queue handles *all* command responses, notifications, Rx data, etc. + * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx + * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can + * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer + * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 + * mapping between RBDs and RBs. + * + * Driver must allocate host DRAM memory for the following, and set the + * physical address of each into 4965 registers: + * + * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 + * entries (although any power of 2, up to 4096, is selectable by driver). + * Each entry (1 dword) points to a receive buffer (RB) of consistent size + * (typically 4K, although 8K or 16K are also selectable by driver). + * Driver sets up RB size and number of RBDs in the CB via Rx config + * register FH_MEM_RCSR_CHNL0_CONFIG_REG. + * + * Bit fields within one RBD: + * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned + * + * Driver sets physical address [35:8] of base of RBD circular buffer + * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. + * + * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers + * (RBs) have been filled, via a "write pointer", actually the index of + * the RB's corresponding RBD within the circular buffer. Driver sets + * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. + * + * Bit fields in lower dword of Rx status buffer (upper dword not used + * by driver; see struct iwl4965_shared, val0): + * 31-12: Not used by driver + * 11- 0: Index of last filled Rx buffer descriptor + * (4965 writes, driver reads this value) + * + * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must + * enter pointers to these RBs into contiguous RBD circular buffer entries, + * and update the 4965's "write" index register, + * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. + * + * This "write" index corresponds to the *next* RBD that the driver will make + * available, i.e. one RBD past the tail of the ready-to-fill RBDs within + * the circular buffer. This value should initially be 0 (before preparing any + * RBs), should be 8 after preparing the first 8 RBs (for example), and must + * wrap back to 0 at the end of the circular buffer (but don't wrap before + * "read" index has advanced past 1! See below). + * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. + * + * As the 4965 fills RBs (referenced from contiguous RBDs within the circular + * buffer), it updates the Rx status buffer in host DRAM, 2) described above, + * to tell the driver the index of the latest filled RBD. The driver must + * read this "read" index from DRAM after receiving an Rx interrupt from 4965. + * + * The driver must also internally keep track of a third index, which is the + * next RBD to process. When receiving an Rx interrupt, driver should process + * all filled but unprocessed RBs up to, but not including, the RB + * corresponding to the "read" index. For example, if "read" index becomes "1", + * driver may process the RB pointed to by RBD 0. Depending on volume of + * traffic, there may be many RBs to process. + * + * If read index == write index, 4965 thinks there is no room to put new data. + * Due to this, the maximum number of filled RBs is 255, instead of 256. To + * be safe, make sure that there is a gap of at least 2 RBDs between "write" + * and "read" indexes; that is, make sure that there are no more than 254 + * buffers waiting to be filled. + */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +/** + * Physical base address of 8-byte Rx Status buffer. + * Bit fields: + * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. + */ +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) + +/** + * Physical base address of Rx Buffer Descriptor Circular Buffer. + * Bit fields: + * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. + */ +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) + +/** + * Rx write pointer (index, really!). + * Bit fields: + * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. + * NOTE: For 256-entry circular buffer, use only bits [7:0]. + */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) + + +/** + * Rx Config/Status Registers (RCSR) + * Rx Config Reg for channel 0 (only channel used) + * + * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for + * normal operation (see bit fields). + * + * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. + * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for + * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. + * + * Bit fields: + * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29-24: reserved + * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), + * min "5" for 32 RBDs, max "12" for 4096 RBDs. + * 19-18: reserved + * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, + * '10' 12K, '11' 16K. + * 15-14: reserved + * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) + * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) + * typical value 0x10 (about 1/2 msec) + * 3- 0: reserved + */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) + +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) +#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) +#define RX_RB_TIMEOUT (0x10) + +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) + +#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */ + +/** + * Rx Shared Status Registers (RSSR) + * + * After stopping Rx DMA channel (writing 0 to + * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll + * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. + * + * Bit fields: + * 24: 1 = Channel 0 is idle + * + * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV + * contain default values that should not be altered by the driver. + */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) + +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ + (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 + +/* TFDB Area - TFDs buffer table */ +#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) +#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) +#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) +#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) +#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) + +/** + * Transmit DMA Channel Control/Status Registers (TCSR) + * + * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels + * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, + * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. + * + * To use a Tx DMA channel, driver must initialize its + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: + * + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL + * + * All other bits should be 0. + * + * Bit fields: + * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29- 4: Reserved, set to "0" + * 3: Enable internal DMA requests (1, normal operation), disable (0) + * 2- 0: Reserved, set to "0" + */ +#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) + +/* Find Control/Status reg for given Tx DMA/FIFO channel */ +#define FH49_TCSR_CHNL_NUM (7) +#define FH50_TCSR_CHNL_NUM (8) + +/* TCSR: tx_config register values */ +#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) +#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) +#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) + +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/** + * Tx Shared Status Registers (TSSR) + * + * After stopping Tx DMA channel (writing 0 to + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll + * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle + * (channel's buffers empty | no pending requests). + * + * Bit fields: + * 31-24: 1 = Channel buffers empty (channel 7:0) + * 23-16: 1 = No pending requests (channel 7:0) + */ +#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) +#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) + +#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) + +/** + * Bit fields for TSSR(Tx Shared Status & Control) error status register: + * 31: Indicates an address error when accessed to internal memory + * uCode/driver must write "1" in order to clear this flag + * 30: Indicates that Host did not send the expected number of dwords to FH + * uCode/driver must write "1" in order to clear this flag + * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA + * command was received from the scheduler while the TRB was already full + * with previous command + * uCode/driver must write "1" in order to clear this flag + * 7-0: Each status bit indicates a channel's TxCredit error. When an error + * bit is set, it indicates that the FH has received a full indication + * from the RTC TxFIFO and the current value of the TxCredit counter was + * not equal to zero. This mean that the credit mechanism was not + * synchronized to the TxFIFO status + * uCode/driver must write "1" in order to clear this flag + */ +#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) + +#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) + +/* Tx service channels */ +#define FH_SRVC_CHNL (9) +#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) +#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ + (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) + +#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) +/* Instruct FH to increment the retry count of a packet when + * it is brought from the memory to TX-FIFO + */ +#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +/* Size of one Rx buffer in host DRAM */ +#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ +#define IWL_RX_BUF_SIZE_4K (4 * 1024) +#define IWL_RX_BUF_SIZE_8K (8 * 1024) + +/** + * struct iwl_rb_status - reseve buffer status + * host memory mapped FH registers + * @closed_rb_num [0:11] - Indicates the index of the RB which was closed + * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed + * @finished_rb_num [0:11] - Indicates the index of the current RB + * in which the last frame was written to + * @finished_fr_num [0:11] - Indicates the index of the RX Frame + * which was transfered + */ +struct iwl_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_nam; + __le32 __unused; /* 3945 only */ +} __packed; + + +#define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_BC_DUP (64) +#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) +#define IWL_NUM_OF_TBS 20 + +static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr) +{ + return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; +} +/** + * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * + * This structure contains dma address and length of transmission address + * + * @lo: low [31:0] portion of the dma address of TX buffer + * every even is unaligned on 16 bit boundary + * @hi_n_len 0-3 [35:32] portion of dma + * 4-15 length of the tx buffer + */ +struct iwl_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __packed; + +/** + * struct iwl_tfd + * + * Transmit Frame Descriptor (TFD) + * + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + * + * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. + * Both driver and device share these circular buffers, each of which must be + * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * + * Driver must indicate the physical address of the base of each + * circular buffer via the FH_MEM_CBBC_QUEUE registers. + * + * Each TFD contains pointer/size information for up to 20 data buffers + * in host DRAM. These buffers collectively contain the (one) frame described + * by the TFD. Each buffer must be a single contiguous block of memory within + * itself, but buffers may be scattered in host DRAM. Each buffer has max size + * of (4K - 4). The concatenates all of a TFD's buffers into a single + * Tx frame, up to 8 KBytes in size. + * + * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. + */ +struct iwl_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; + __le32 __pad; +} __packed; + +/* Keep Warm Size */ +#define IWL_KW_SIZE 0x1000 /* 4k */ + +#endif /* !__iwl_legacy_fh_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c new file mode 100644 index 000000000000..9d721cbda5bb --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c @@ -0,0 +1,271 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <net/mac80211.h> + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" + + +const char *iwl_legacy_get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_WEPKEY); + IWL_CMD(REPLY_3945_RX); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + default: + return "UNKNOWN"; + + } +} +EXPORT_SYMBOL(iwl_legacy_get_cmd_string); + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + switch (cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + break; + default: + IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + } +#endif +} + +static int +iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->flags & CMD_WANT_SKB); + + /* Assign a generic callback if one is not provided */ + if (!cmd->callback) + cmd->callback = iwl_legacy_generic_cmd_callback; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_legacy_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_legacy_get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + + BUG_ON(cmd->flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->callback); + + IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + mutex_lock(&priv->sync_cmd_mutex); + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + + cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_legacy_get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERR(priv, + "Error sending %s: time out after %dms.\n", + iwl_legacy_get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, + "Clearing HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { + IWL_ERR(priv, "Error: Response NULL in '%s'\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -EIO; + goto cancel; + } + + ret = 0; + goto out; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= + ~CMD_WANT_SKB; + } +fail: + if (cmd->reply_page) { + iwl_legacy_free_pages(priv, cmd->reply_page); + cmd->reply_page = 0; + } +out: + mutex_unlock(&priv->sync_cmd_mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_sync); + +int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + if (cmd->flags & CMD_ASYNC) + return iwl_legacy_send_cmd_async(priv, cmd); + + return iwl_legacy_send_cmd_sync(priv, cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd); + +int +iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_legacy_send_cmd_sync(priv, &cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu); + +int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, + u8 id, u16 len, const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + cmd.flags |= CMD_ASYNC; + cmd.callback = callback; + + return iwl_legacy_send_cmd_async(priv, &cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async); diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h new file mode 100644 index 000000000000..02132e755831 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h @@ -0,0 +1,181 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_helpers_h__ +#define __iwl_legacy_helpers_h__ + +#include <linux/ctype.h> +#include <net/mac80211.h> + +#include "iwl-io.h" + +#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) + + +static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf( + struct ieee80211_hw *hw) +{ + return &hw->conf; +} + +/** + * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/** + * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +/* TODO: Move fw_desc functions to iwl-pci.ko */ +static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(&pci_dev->dev, desc->len, + desc->v_addr, desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (!desc->len) { + desc->v_addr = NULL; + return -EINVAL; + } + + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, + &desc->p_addr, GFP_KERNEL); + return (desc->v_addr != NULL) ? 0 : -ENOMEM; +} + +/* + * we have 8 bits used like this: + * + * 7 6 5 4 3 2 1 0 + * | | | | | | | | + * | | | | | | +-+-------- AC queue (0-3) + * | | | | | | + * | +-+-+-+-+------------ HW queue ID + * | + * +---------------------- unused + */ +static inline void +iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) +{ + BUG_ON(ac > 3); /* only have 2 bits */ + BUG_ON(hwq > 31); /* only use 5 bits */ + + txq->swq_id = (hwq << 2) | ac; +} + +static inline void iwl_legacy_wake_queue(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (test_and_clear_bit(hwq, priv->queue_stopped)) + if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(priv->hw, ac); +} + +static inline void iwl_legacy_stop_queue(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (!test_and_set_bit(hwq, priv->queue_stopped)) + if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(priv->hw, ac); +} + +#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue +#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue + +static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); +} + +static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); +} + +/** + * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv, + u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv, + u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + +#endif /* __iwl_legacy_helpers_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h new file mode 100644 index 000000000000..5cc5d342914f --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-io.h @@ -0,0 +1,545 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_io_h__ +#define __iwl_legacy_io_h__ + +#include <linux/io.h> + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-devtrace.h" + +/* + * IO, register, and NIC memory access functions + * + * NOTE on naming convention and macro usage for these + * + * A single _ prefix before a an access function means that no state + * check or debug information is printed when that function is called. + * + * A double __ prefix before an access function means that state is checked + * and the current line number and caller function name are printed in addition + * to any other debug output. + * + * The non-prefixed name is the #define that maps the caller into a + * #define that provides the caller's name and __LINE__ to the double + * prefix version. + * + * If you wish to call the function without any debug or state checking, + * you should use the single _ prefix version (as is used by dependent IO + * routines, for example _iwl_legacy_read_direct32 calls the non-check version of + * _iwl_legacy_read32.) + * + * These declarations are *extremely* useful in quickly isolating code deltas + * which result in misconfiguration of the hardware I/O. In combination with + * git-bisect and the IO debug level you can quickly determine the specific + * commit which breaks the IO sequence to the hardware. + * + */ + +static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val) +{ + trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val); + iowrite8(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u8 val) +{ + IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l); + _iwl_legacy_write8(priv, ofs, val); +} +#define iwl_write8(priv, ofs, val) \ + __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val) +#endif + + +static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val) +{ + trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val); + iowrite32(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u32 val) +{ + IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); + _iwl_legacy_write32(priv, ofs, val); +} +#define iwl_write32(priv, ofs, val) \ + __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val) +#endif + +static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs) +{ + u32 val = ioread32(priv->hw_base + ofs); + trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val); + return val; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline u32 +__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) +{ + IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l); + return _iwl_legacy_read32(priv, ofs); +} +#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs) +#else +#define iwl_read32(p, o) _iwl_legacy_read32(p, o) +#endif + +#define IWL_POLL_INTERVAL 10 /* microseconds */ +static inline int +_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int t = 0; + + do { + if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask)) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_poll_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout); + IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", + addr, bits, mask, + unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l); + return ret; +} +#define iwl_poll_bit(priv, addr, bits, mask, timeout) \ + __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \ + bits, mask, timeout) +#else +#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t) +#endif + +static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void __iwl_legacy_set_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_legacy_read32(priv, reg) | mask; + IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, + mask, val); + _iwl_legacy_write32(priv, reg, val); +} +static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_legacy_set_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline void +_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_clear_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_legacy_read32(priv, reg) & ~mask; + IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_legacy_write32(priv, reg, val); +} +static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_legacy_clear_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv) +{ + int ret; + u32 val; + + /* this bit wakes up the NIC */ + _iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + */ + ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (ret < 0) { + val = _iwl_legacy_read32(priv, CSR_GP_CNTRL); + IWL_ERR(priv, + "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); + _iwl_legacy_write32(priv, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + return -EIO; + } + + return 0; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l); + return _iwl_legacy_grab_nic_access(priv); +} +#define iwl_grab_nic_access(priv) \ + __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_grab_nic_access(priv) \ + _iwl_legacy_grab_nic_access(priv) +#endif + +static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv) +{ + _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void __iwl_legacy_release_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + + IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l); + _iwl_legacy_release_nic_access(priv); +} +#define iwl_release_nic_access(priv) \ + __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_release_nic_access(priv) \ + _iwl_legacy_release_nic_access(priv) +#endif + +static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + return _iwl_legacy_read32(priv, reg); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l, + struct iwl_priv *priv, u32 reg) +{ + u32 value = _iwl_legacy_read_direct32(priv, reg); + IWL_DEBUG_IO(priv, + "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value, + f, l); + return value; +} +static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +#else +static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = _iwl_legacy_read_direct32(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; + +} +#endif + +static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv, + u32 reg, u32 value) +{ + _iwl_legacy_write32(priv, reg, value); +} +static inline void +iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, reg, value); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv, + u32 reg, u32 len, u32 *values) +{ + u32 count = sizeof(u32); + + if ((priv != NULL) && (values != NULL)) { + for (; 0 < len; len -= count, reg += count, values++) + iwl_legacy_write_direct32(priv, reg, *values); + } +} + +static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr, + u32 mask, int timeout) +{ + int t = 0; + + do { + if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l, + struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout); + + if (unlikely(ret == -ETIMEDOUT)) + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - " + "timedout - %s %d\n", addr, mask, f, l); + else + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X " + "- %s %d\n", addr, mask, ret, f, l); + return ret; +} +#define iwl_poll_direct_bit(priv, addr, mask, timeout) \ +__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout) +#else +#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit +#endif + +static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) +{ + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + rmb(); + return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT); +} +static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_legacy_read_prph(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return val; +} + +static inline void _iwl_legacy_write_prph(struct iwl_priv *priv, + u32 addr, u32 val) +{ + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR, + ((addr & 0x0000FFFF) | (3 << 24))); + wmb(); + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); +} + +static inline void +iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_prph(priv, addr, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_legacy_set_bits_prph(priv, reg, mask) \ +_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask)) + +static inline void +iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_legacy_set_bits_prph(priv, reg, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \ +_iwl_legacy_write_prph(priv, reg, \ + ((_iwl_legacy_read_prph(priv, reg) & mask) | bits)) + +static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, + u32 bits, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_legacy_clear_bits_prph(struct iwl_priv + *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_legacy_read_prph(priv, reg); + _iwl_legacy_write_prph(priv, reg, (val & ~mask)); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr) +{ + unsigned long reg_flags; + u32 value; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); + rmb(); + value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +static inline void +iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void +iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr, + u32 len, u32 *values) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + for (; 0 < len; len -= sizeof(u32), values++) + _iwl_legacy_write_direct32(priv, + HBUS_TARG_MEM_WDAT, *values); + + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c new file mode 100644 index 000000000000..15eb8b707157 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-led.c @@ -0,0 +1,188 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* default: IWL_LED_BLINK(0) using blinking index table */ +static int led_mode; +module_param(led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "0=system default, " + "1=On(RF On)/Off(RF Off), 2=blinking"); + +static const struct ieee80211_tpt_blink iwl_blink[] = { + { .throughput = 0 * 1024 - 1, .blink_time = 334 }, + { .throughput = 1 * 1024 - 1, .blink_time = 260 }, + { .throughput = 5 * 1024 - 1, .blink_time = 220 }, + { .throughput = 10 * 1024 - 1, .blink_time = 190 }, + { .throughput = 20 * 1024 - 1, .blink_time = 170 }, + { .throughput = 50 * 1024 - 1, .blink_time = 150 }, + { .throughput = 70 * 1024 - 1, .blink_time = 130 }, + { .throughput = 100 * 1024 - 1, .blink_time = 110 }, + { .throughput = 200 * 1024 - 1, .blink_time = 80 }, + { .throughput = 300 * 1024 - 1, .blink_time = 50 }, +}; + +/* + * Adjust led blink rate to compensate on a MAC Clock difference on every HW + * Led blink rate analysis showed an average deviation of 0% on 3945, + * 5% on 4965 HW. + * Need to compensate on the led on/off time per HW according to the deviation + * to achieve the desired led frequency + * The calculation is: (100-averageDeviation)/100 * blinkTime + * For code efficiency the calculation will be: + * compensation = (100 - averageDeviation) * 64 / 100 + * NewBlinkTime = (compensation * BlinkTime) / 64 + */ +static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv, + u8 time, u16 compensation) +{ + if (!compensation) { + IWL_ERR(priv, "undefined blink compensation: " + "use pre-defined blinking time\n"); + return time; + } + + return (u8)((time * compensation) >> 6); +} + +/* Set led pattern command */ +static int iwl_legacy_led_cmd(struct iwl_priv *priv, + unsigned long on, + unsigned long off) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .interval = IWL_DEF_LED_INTRVL + }; + int ret; + + if (!test_bit(STATUS_READY, &priv->status)) + return -EBUSY; + + if (priv->blink_on == on && priv->blink_off == off) + return 0; + + IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", + priv->cfg->base_params->led_compensation); + led_cmd.on = iwl_legacy_blink_compensation(priv, on, + priv->cfg->base_params->led_compensation); + led_cmd.off = iwl_legacy_blink_compensation(priv, off, + priv->cfg->base_params->led_compensation); + + ret = priv->cfg->ops->led->cmd(priv, &led_cmd); + if (!ret) { + priv->blink_on = on; + priv->blink_off = off; + } + return ret; +} + +static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + unsigned long on = 0; + + if (brightness > 0) + on = IWL_LED_SOLID; + + iwl_legacy_led_cmd(priv, on, 0); +} + +static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + + return iwl_legacy_led_cmd(priv, *delay_on, *delay_off); +} + +void iwl_legacy_leds_init(struct iwl_priv *priv) +{ + int mode = led_mode; + int ret; + + if (mode == IWL_LED_DEFAULT) + mode = priv->cfg->led_mode; + + priv->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(priv->hw->wiphy)); + priv->led.brightness_set = iwl_legacy_led_brightness_set; + priv->led.blink_set = iwl_legacy_led_blink_set; + priv->led.max_brightness = 1; + + switch (mode) { + case IWL_LED_DEFAULT: + WARN_ON(1); + break; + case IWL_LED_BLINK: + priv->led.default_trigger = + ieee80211_create_tpt_led_trigger(priv->hw, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED, + iwl_blink, ARRAY_SIZE(iwl_blink)); + break; + case IWL_LED_RF_STATE: + priv->led.default_trigger = + ieee80211_get_radio_led_name(priv->hw); + break; + } + + ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); + if (ret) { + kfree(priv->led.name); + return; + } + + priv->led_registered = true; +} +EXPORT_SYMBOL(iwl_legacy_leds_init); + +void iwl_legacy_leds_exit(struct iwl_priv *priv) +{ + if (!priv->led_registered) + return; + + led_classdev_unregister(&priv->led); + kfree(priv->led.name); +} +EXPORT_SYMBOL(iwl_legacy_leds_exit); diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h new file mode 100644 index 000000000000..f0791f70f79d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-led.h @@ -0,0 +1,56 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_leds_h__ +#define __iwl_legacy_leds_h__ + + +struct iwl_priv; + +#define IWL_LED_SOLID 11 +#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) + +#define IWL_LED_ACTIVITY (0<<1) +#define IWL_LED_LINK (1<<1) + +/* + * LED mode + * IWL_LED_DEFAULT: use device default + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IWL_LED_BLINK: adjust led blink rate based on blink table + */ +enum iwl_led_mode { + IWL_LED_DEFAULT, + IWL_LED_RF_STATE, + IWL_LED_BLINK, +}; + +void iwl_legacy_leds_init(struct iwl_priv *priv); +void iwl_legacy_leds_exit(struct iwl_priv *priv); + +#endif /* __iwl_legacy_leds_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h new file mode 100644 index 000000000000..f66a1b2c0397 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h @@ -0,0 +1,456 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_rs_h__ +#define __iwl_legacy_rs_h__ + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +struct iwl3945_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ + u8 table_rs_index; /* index in rate scale table cmd */ + u8 prev_table_rs; /* prev in rate table cmd */ +}; + + +/* + * These serve as indexes into + * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ + IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + +/* uCode API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ +}; + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 0x8, + IWL_RATE_MIMO2_12M_PLCP = 0x9, + IWL_RATE_MIMO2_18M_PLCP = 0xa, + IWL_RATE_MIMO2_24M_PLCP = 0xb, + IWL_RATE_MIMO2_36M_PLCP = 0xc, + IWL_RATE_MIMO2_48M_PLCP = 0xd, + IWL_RATE_MIMO2_54M_PLCP = 0xe, + IWL_RATE_MIMO2_60M_PLCP = 0xf, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_CCK_BASIC_RATES_MASK \ + (IWL_RATE_1M_MASK | \ + IWL_RATE_2M_MASK) + +#define IWL_CCK_RATES_MASK \ + (IWL_CCK_BASIC_RATES_MASK | \ + IWL_RATE_5M_MASK | \ + IWL_RATE_11M_MASK) + +#define IWL_OFDM_BASIC_RATES_MASK \ + (IWL_RATE_6M_MASK | \ + IWL_RATE_12M_MASK | \ + IWL_RATE_24M_MASK) + +#define IWL_OFDM_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_RATE_9M_MASK | \ + IWL_RATE_18M_MASK | \ + IWL_RATE_36M_MASK | \ + IWL_RATE_48M_MASK | \ + IWL_RATE_54M_MASK) + +#define IWL_BASIC_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_CCK_BASIC_RATES_MASK) + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) +#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IWL_RS_GOOD_RATIO 12800 /* 100% */ +#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ +#define IWL_RATE_HIGH_TH 10880 /* 85% */ +#define IWL_RATE_INCREASE_TH 6400 /* 50% */ +#define IWL_RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IWL_LEGACY_SWITCH_ANTENNA1 0 +#define IWL_LEGACY_SWITCH_ANTENNA2 1 +#define IWL_LEGACY_SWITCH_SISO 2 +#define IWL_LEGACY_SWITCH_MIMO2_AB 3 +#define IWL_LEGACY_SWITCH_MIMO2_AC 4 +#define IWL_LEGACY_SWITCH_MIMO2_BC 5 + +/* possible actions when in siso mode */ +#define IWL_SISO_SWITCH_ANTENNA1 0 +#define IWL_SISO_SWITCH_ANTENNA2 1 +#define IWL_SISO_SWITCH_MIMO2_AB 2 +#define IWL_SISO_SWITCH_MIMO2_AC 3 +#define IWL_SISO_SWITCH_MIMO2_BC 4 +#define IWL_SISO_SWITCH_GI 5 + +/* possible actions when in mimo mode */ +#define IWL_MIMO2_SWITCH_ANTENNA1 0 +#define IWL_MIMO2_SWITCH_ANTENNA2 1 +#define IWL_MIMO2_SWITCH_SISO_A 2 +#define IWL_MIMO2_SWITCH_SISO_B 3 +#define IWL_MIMO2_SWITCH_SISO_C 4 +#define IWL_MIMO2_SWITCH_GI 5 + +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI + +#define IWL_ACTION_LIMIT 3 /* # possible actions */ + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_LOAD_THRESHOLD 10 +#define IWL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; + +enum iwl_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MAX, +}; + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo(tbl) (is_mimo2(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_C BIT(2) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_AB | ANT_C) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 is_dup; /* 1 = duplicated data streams */ + u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ +}; + +struct iwl_traffic_load { + unsigned long time_stamp; /* age of the oldest statistics */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + u8 is_dup; + enum ieee80211_band band; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct iwl_priv *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; +}; + +static inline u8 iwl4965_num_of_ant(u8 mask) +{ + return !!((mask) & ANT_A) + + !!((mask) & ANT_B) + + !!((mask) & ANT_C); +} + +static inline u8 iwl4965_first_antenna(u8 mask) +{ + if (mask & ANT_A) + return ANT_A; + if (mask & ANT_B) + return ANT_B; + return ANT_C; +} + + +/** + * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific throughput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/* Initialize station's rate scaling information after adding station */ +extern void iwl4965_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, u8 sta_id); +extern void iwl3945_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, u8 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern int iwl4965_rate_control_register(void); +extern int iwl3945_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwl4965_rate_control_unregister(void); +extern void iwl3945_rate_control_unregister(void); + +#endif /* __iwl_legacy_rs__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c new file mode 100644 index 000000000000..903ef0d6d6cb --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-power.c @@ -0,0 +1,165 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-commands.h" +#include "iwl-debug.h" +#include "iwl-power.h" + +/* + * Setting power level allows the card to go to sleep when not busy. + * + * We calculate a sleep command based on the required latency, which + * we get from mac80211. In order to handle thermal throttling, we can + * also use pre-defined power levels. + */ + +/* + * This defines the old power levels. They are still used by default + * (level 1) and for thermal throttle (levels 3 through 5) + */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; /* number of skip dtim */ +}; + +static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + + if (priv->power_data.pci_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); +} + +static int +iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) +{ + IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); + IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", + le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", + le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(priv, + "Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD, + sizeof(struct iwl_powertable_cmd), cmd); +} + +int +iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force) +{ + int ret; + bool update_chains; + + lockdep_assert_held(&priv->mutex); + + /* Don't update the RX chain when chain noise calibration is running */ + update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || + priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; + + if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) + return 0; + + if (!iwl_legacy_is_ready_rf(priv)) + return -EIO; + + /* scan complete use sleep_power_next, need to be updated */ + memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); + if (test_bit(STATUS_SCANNING, &priv->status) && !force) { + IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); + return 0; + } + + if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) + set_bit(STATUS_POWER_PMI, &priv->status); + + ret = iwl_legacy_set_power(priv, cmd); + if (!ret) { + if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) + clear_bit(STATUS_POWER_PMI, &priv->status); + + if (priv->cfg->ops->lib->update_chain_flags && update_chains) + priv->cfg->ops->lib->update_chain_flags(priv); + else if (priv->cfg->ops->lib->update_chain_flags) + IWL_DEBUG_POWER(priv, + "Cannot update the power, chain noise " + "calibration running: %d\n", + priv->chain_noise_data.state); + + memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); + } else + IWL_ERR(priv, "set power fail, ret = %d", ret); + + return ret; +} + +int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force) +{ + struct iwl_powertable_cmd cmd; + + iwl_legacy_power_sleep_cam_cmd(priv, &cmd); + return iwl_legacy_power_set_mode(priv, &cmd, force); +} +EXPORT_SYMBOL(iwl_legacy_power_update_mode); + +/* initialize to default */ +void iwl_legacy_power_initialize(struct iwl_priv *priv) +{ + u16 lctl = iwl_legacy_pcie_link_ctl(priv); + + priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); + + priv->power_data.debug_sleep_level_override = -1; + + memset(&priv->power_data.sleep_cmd, 0, + sizeof(priv->power_data.sleep_cmd)); +} +EXPORT_SYMBOL(iwl_legacy_power_initialize); diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h new file mode 100644 index 000000000000..d30b36acdc4a --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-power.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#ifndef __iwl_legacy_power_setting_h__ +#define __iwl_legacy_power_setting_h__ + +#include "iwl-commands.h" + +enum iwl_power_level { + IWL_POWER_INDEX_1, + IWL_POWER_INDEX_2, + IWL_POWER_INDEX_3, + IWL_POWER_INDEX_4, + IWL_POWER_INDEX_5, + IWL_POWER_NUM +}; + +struct iwl_power_mgr { + struct iwl_powertable_cmd sleep_cmd; + struct iwl_powertable_cmd sleep_cmd_next; + int debug_sleep_level_override; + bool pci_pm; +}; + +int +iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force); +int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force); +void iwl_legacy_power_initialize(struct iwl_priv *priv); + +#endif /* __iwl_legacy_power_setting_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h new file mode 100644 index 000000000000..30a493003ab0 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-prph.h @@ -0,0 +1,523 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_prph_h__ +#define __iwl_legacy_prph_h__ + +/* + * Registers in this file are internal, not PCI bus memory mapped. + * Driver accesses these via HBUS_TARG_PRPH_* registers. + */ +#define PRPH_BASE (0x00000) +#define PRPH_END (0xFFFFF) + +/* APMG (power management) constants */ +#define APMG_BASE (PRPH_BASE + 0x3000) +#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) +#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) +#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) +#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) +#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) +#define APMG_RFKILL_REG (APMG_BASE + 0x0014) +#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) +#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) +#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) +#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) + +#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001) +#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) +#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) + +#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) +#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) +#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ +#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) +#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ +#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) + +#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) + +/** + * BSM (Bootstrap State Machine) + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down when the embedded control + * processor is sleeping (e.g. for periodic power-saving shutdowns of radio). + * + * When powering back up after sleeps (or during initial uCode load), the BSM + * internally loads the short bootstrap program from the special SRAM into the + * embedded processor's instruction SRAM, and starts the processor so it runs + * the bootstrap program. + * + * This bootstrap program loads (via PCI busmaster DMA) instructions and data + * images for a uCode program from host DRAM locations. The host driver + * indicates DRAM locations and sizes for instruction and data images via the + * four BSM_DRAM_* registers. Once the bootstrap program loads the new program, + * the new program starts automatically. + * + * The uCode used for open-source drivers includes two programs: + * + * 1) Initialization -- performs hardware calibration and sets up some + * internal data, then notifies host via "initialize alive" notification + * (struct iwl_init_alive_resp) that it has completed all of its work. + * After signal from host, it then loads and starts the runtime program. + * The initialization program must be used when initially setting up the + * NIC after loading the driver. + * + * 2) Runtime/Protocol -- performs all normal runtime operations. This + * notifies host via "alive" notification (struct iwl_alive_resp) that it + * is ready to be used. + * + * When initializing the NIC, the host driver does the following procedure: + * + * 1) Load bootstrap program (instructions only, no data image for bootstrap) + * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND + * + * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction + * images in host DRAM. + * + * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked: + * BSM_WR_MEM_SRC_REG = 0 + * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND + * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image + * + * 4) Load bootstrap into instruction SRAM: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START + * + * 5) Wait for load completion: + * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0 + * + * 6) Enable future boot loads whenever NIC's power management triggers it: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN + * + * 7) Start the NIC by removing all reset bits: + * CSR_RESET = 0 + * + * The bootstrap uCode (already in instruction SRAM) loads initialization + * uCode. Initialization uCode performs data initialization, sends + * "initialize alive" notification to host, and waits for a signal from + * host to load runtime code. + * + * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction + * images in host DRAM. The last register loaded must be the instruction + * byte count register ("1" in MSbit tells initialization uCode to load + * the runtime uCode): + * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD + * + * 5) Wait for "alive" notification, then issue normal runtime commands. + * + * Data caching during power-downs: + * + * Just before the embedded controller powers down (e.g for automatic + * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA) + * a current snapshot of the embedded processor's data SRAM into host DRAM. + * This caches the data while the embedded processor's memory is powered down. + * Location and size are controlled by BSM_DRAM_DATA_* registers. + * + * NOTE: Instruction SRAM does not need to be saved, since that doesn't + * change during operation; the original image (from uCode distribution + * file) can be used for reload. + * + * When powering back up, the BSM loads the bootstrap program. Bootstrap looks + * at the BSM_DRAM_* registers, which now point to the runtime instruction + * image and the cached (modified) runtime data (*not* the initialization + * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the + * uCode from where it left off before the power-down. + * + * NOTE: Initialization uCode does *not* run as part of the save/restore + * procedure. + * + * This save/restore method is mostly for autonomous power management during + * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and + * RFKILL should use complete restarts (with total re-initialization) of uCode, + * allowing total shutdown (including BSM memory). + * + * Note that, during normal operation, the host DRAM that held the initial + * startup data for the runtime code is now being used as a backup data cache + * for modified data! If you need to completely re-initialize the NIC, make + * sure that you use the runtime data image from the uCode distribution file, + * not the modified/saved runtime data. You may want to store a separate + * "clean" runtime data image in DRAM to avoid disk reads of distribution file. + */ + +/* BSM bit fields */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ + +/* BSM addresses */ +#define BSM_BASE (PRPH_BASE + 0x3400) +#define BSM_END (PRPH_BASE + 0x3800) + +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ + +/* + * Pointers and size regs for bootstrap load and data SRAM save/restore. + * NOTE: 3945 pointers use bits 31:0 of DRAM address. + * 4965 pointers use bits 35:4 of DRAM address. + */ +#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090) +#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094) +#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098) +#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C) + +/* + * BSM special memory, stays powered on during power-save sleeps. + * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) + */ +#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) +#define BSM_SRAM_SIZE (1024) /* bytes */ + + +/* 3945 Tx scheduler registers */ +#define ALM_SCD_BASE (PRPH_BASE + 0x2E00) +#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000) +#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004) +#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010) +#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014) +#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020) +#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C) +#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030) + +/** + * Tx Scheduler + * + * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs + * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in + * host DRAM. It steers each frame's Tx command (which contains the frame + * data) into one of up to 7 prioritized Tx DMA FIFO channels within the + * device. A queue maps to only one (selectable by driver) Tx DMA channel, + * but one DMA channel may take input from several queues. + * + * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows + * (cf. default_queue_to_tx_fifo in iwl-4965.c): + * + * 0 -- EDCA BK (background) frames, lowest priority + * 1 -- EDCA BE (best effort) frames, normal priority + * 2 -- EDCA VI (video) frames, higher priority + * 3 -- EDCA VO (voice) and management frames, highest priority + * 4 -- Commands (e.g. RXON, etc.) + * 5 -- unused (HCCA) + * 6 -- unused (HCCA) + * 7 -- not used by driver (device-internal only) + * + * + * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. + * In addition, driver can map the remaining queues to Tx DMA/FIFO + * channels 0-3 to support 11n aggregation via EDCA DMA channels. + * + * The driver sets up each queue to work in one of two modes: + * + * 1) Scheduler-Ack, in which the scheduler automatically supports a + * block-ack (BA) window of up to 64 TFDs. In this mode, each queue + * contains TFDs for a unique combination of Recipient Address (RA) + * and Traffic Identifier (TID), that is, traffic of a given + * Quality-Of-Service (QOS) priority, destined for a single station. + * + * In scheduler-ack mode, the scheduler keeps track of the Tx status of + * each frame within the BA window, including whether it's been transmitted, + * and whether it's been acknowledged by the receiving station. The device + * automatically processes block-acks received from the receiving STA, + * and reschedules un-acked frames to be retransmitted (successful + * Tx completion may end up being out-of-order). + * + * The driver must maintain the queue's Byte Count table in host DRAM + * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. + * This mode does not support fragmentation. + * + * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. + * The device may automatically retry Tx, but will retry only one frame + * at a time, until receiving ACK from receiving station, or reaching + * retry limit and giving up. + * + * The command queue (#4/#9) must use this mode! + * This mode does not require use of the Byte Count table in host DRAM. + * + * Driver controls scheduler operation via 3 means: + * 1) Scheduler registers + * 2) Shared scheduler data base in internal 4956 SRAM + * 3) Shared data in host DRAM + * + * Initialization: + * + * When loading, driver should allocate memory for: + * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. + * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory + * (1024 bytes for each queue). + * + * After receiving "Alive" response from uCode, driver must initialize + * the scheduler (especially for queue #4/#9, the command queue, otherwise + * the driver can't issue commands!): + */ + +/** + * Max Tx window size is the max number of contiguous TFDs that the scheduler + * can keep track of at one time when creating block-ack chains of frames. + * Note that "64" matches the number of ack bits in a block-ack packet. + * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize + * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. + */ +#define SCD_WIN_SIZE 64 +#define SCD_FRAME_LIMIT 64 + +/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ +#define IWL49_SCD_START_OFFSET 0xa02c00 + +/* + * 4965 tells driver SRAM address for internal scheduler structs via this reg. + * Value is valid only after "Alive" response from uCode. + */ +#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) + +/* + * Driver may need to update queue-empty bits after changing queue's + * write and read pointers (indexes) during (re-)initialization (i.e. when + * scheduler is not tracking what's happening). + * Bit fields: + * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit + * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty + * NOTE: This register is not used by Linux driver. + */ +#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) + +/* + * Physical base address of array of byte count (BC) circular buffers (CBs). + * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode. + * This register points to BC CB for queue 0, must be on 1024-byte boundary. + * Others are spaced by 1024 bytes. + * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. + * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). + * Bit fields: + * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. + */ +#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) + +/* + * Enables any/all Tx DMA/FIFO channels. + * Scheduler generates requests for only the active channels. + * Set this to 0xff to enable all 8 channels (normal usage). + * Bit fields: + * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 + */ +#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) +/* + * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. + * Initialized and updated by driver as new TFDs are added to queue. + * NOTE: If using Block Ack, index must correspond to frame's + * Start Sequence Number; index = (SSN & 0xff) + * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? + */ +#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) + +/* + * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. + * For FIFO mode, index indicates next frame to transmit. + * For Scheduler-ACK mode, index indicates first frame in Tx window. + * Initialized by driver, updated by scheduler. + */ +#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) + +/* + * Select which queues work in chain mode (1) vs. not (0). + * Use chain mode to build chains of aggregated frames. + * Bit fields: + * 31-16: Reserved + * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time + * NOTE: If driver sets up queue for chain mode, it should be also set up + * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). + */ +#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) + +/* + * Select which queues interrupt driver when scheduler increments + * a queue's read pointer (index). + * Bit fields: + * 31-16: Reserved + * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled + * NOTE: This functionality is apparently a no-op; driver relies on interrupts + * from Rx queue to read Tx command responses and update Tx queues. + */ +#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) + +/* + * Queue search status registers. One for each queue. + * Sets up queue mode and assigns queue to Tx DMA channel. + * Bit fields: + * 19-10: Write mask/enable bits for bits 0-9 + * 9: Driver should init to "0" + * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0). + * Driver should init to "1" for aggregation mode, or "0" otherwise. + * 7-6: Driver should init to "0" + * 5: Window Size Left; indicates whether scheduler can request + * another TFD, based on window size, etc. Driver should init + * this bit to "1" for aggregation mode, or "0" for non-agg. + * 4-1: Tx FIFO to use (range 0-7). + * 0: Queue is active (1), not active (0). + * Other bits should be written as "0" + * + * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled + * via SCD_QUEUECHAIN_SEL. + */ +#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ + (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) + +/* Bit field positions */ +#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) +#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) + +/* Write masks */ +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) + +/** + * 4965 internal SRAM structures for scheduler, shared with driver ... + * + * Driver should clear and initialize the following areas after receiving + * "Alive" response from 4965 uCode, i.e. after initial + * uCode load, or after a uCode load done for error recovery: + * + * SCD_CONTEXT_DATA_OFFSET (size 128 bytes) + * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes) + * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes) + * + * Driver accesses SRAM via HBUS_TARG_MEM_* registers. + * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR. + * All OFFSET values must be added to this base address. + */ + +/* + * Queue context. One 8-byte entry for each of 16 queues. + * + * Driver should clear this entire area (size 0x80) to 0 after receiving + * "Alive" notification from uCode. Additionally, driver should init + * each queue's entry as follows: + * + * LS Dword bit fields: + * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. + * + * MS Dword bit fields: + * 16-22: Frame limit. Driver should init to 10 (0xa). + * + * Driver should init all other bits to 0. + * + * Init must be done after driver receives "Alive" response from 4965 uCode, + * and when setting up queue for aggregation. + */ +#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 +#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ + (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) + +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +/* + * Tx Status Bitmap + * + * Driver should clear this entire area (size 0x100) to 0 after receiving + * "Alive" notification from uCode. Area is used only by device itself; + * no other support (besides clearing) is required from driver. + */ +#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 + +/* + * RAxTID to queue translation mapping. + * + * When queue is in Scheduler-ACK mode, frames placed in a that queue must be + * for only one combination of receiver address (RA) and traffic ID (TID), i.e. + * one QOS priority level destined for one station (for this wireless link, + * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit + * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK + * mode, the device ignores the mapping value. + * + * Bit fields, for each 16-bit map: + * 15-9: Reserved, set to 0 + * 8-4: Index into device's station table for recipient station + * 3-0: Traffic ID (tid), range 0-15 + * + * Driver should clear this entire area (size 32 bytes) to 0 after receiving + * "Alive" notification from uCode. To update a 16-bit map value, driver + * must read a dword-aligned value from device SRAM, replace the 16-bit map + * value of interest, and write the dword value back into device SRAM. + */ +#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 + +/* Find translation table dword to read/write for given queue */ +#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) + +#define IWL_SCD_TXFIFO_POS_TID (0) +#define IWL_SCD_TXFIFO_POS_RA (4) +#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) + +/*********************** END TX SCHEDULER *************************************/ + +#endif /* __iwl_legacy_prph_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c new file mode 100644 index 000000000000..654cf233a384 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-rx.c @@ -0,0 +1,302 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/etherdevice.h> +#include <linux/slab.h> +#include <net/mac80211.h> +#include <asm/unaligned.h> +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_legacy_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_legacy_rx_queue_space - Return number of free slots available in queue. + */ +int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_space); + +/** + * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue + */ +void +iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q) +{ + unsigned long flags; + u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; + u32 reg; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + /* If power-saving is in use, make sure device is awake */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Rx queue requesting wakeup," + " GP1 = 0x%x\n", reg); + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + q->write_actual = (q->write & ~0x7); + iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, + q->write_actual); + + /* Else device is assumed to be awake */ + } else { + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, + q->write_actual); + } + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr); + +int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct device *dev = &priv->pci_dev->dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, + GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb; + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; + +err_rb: + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); +err_bd: + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc); + + +void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG_11H(priv, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +} +EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif); + +void iwl_legacy_recover_from_statistics(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + if (iwl_legacy_is_any_associated(priv)) { + if (priv->cfg->ops->lib->check_plcp_health) { + if (!priv->cfg->ops->lib->check_plcp_health( + priv, pkt)) { + /* + * high plcp error detected + * reset Radio + */ + iwl_legacy_force_reset(priv, + IWL_RF_RESET, false); + } + } + } +} +EXPORT_SYMBOL(iwl_legacy_recover_from_statistics); + +/* + * returns non-zero if packet should be dropped + */ +int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + + /* + * All contexts have the same setting here due to it being + * a module parameter, so OK to check any context. + */ + if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & + RXON_FILTER_DIS_DECRYPT_MSK) + return 0; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return 0; + + IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + /* The uCode has got a bad phase 1 Key, pushes the packet. + * Decryption will be done in SW. */ + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_KEY_TTAK) + break; + + case RX_RES_STATUS_SEC_TYPE_WEP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) { + /* bad ICV, the packet is destroyed since the + * decryption is inplace, drop it */ + IWL_DEBUG_RX(priv, "Packet destroyed\n"); + return -1; + } + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag); diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c new file mode 100644 index 000000000000..842f0b46b6df --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-scan.c @@ -0,0 +1,625 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/etherdevice.h> +#include <net/mac80211.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (20) + +#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) +#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static int iwl_legacy_send_scan_abort(struct iwl_priv *priv) +{ + int ret; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .flags = CMD_WANT_SKB, + }; + + /* Exit instantly with error when device is not ready + * to receive scan abort command or it does not perform + * hardware scan currently */ + if (!test_bit(STATUS_READY, &priv->status) || + !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || + !test_bit(STATUS_SCAN_HW, &priv->status) || + test_bit(STATUS_FW_ERROR, &priv->status) || + test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EIO; + + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status); + ret = -EIO; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + return ret; +} + +static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted) +{ + /* check if scan was requested from mac80211 */ + if (priv->scan_request) { + IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); + ieee80211_scan_completed(priv->hw, aborted); + } + + priv->is_internal_short_scan = false; + priv->scan_vif = NULL; + priv->scan_request = NULL; +} + +void iwl_legacy_force_scan_end(struct iwl_priv *priv) +{ + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); + return; + } + + IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); + clear_bit(STATUS_SCANNING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_legacy_complete_scan(priv, true); +} + +static void iwl_legacy_do_scan_abort(struct iwl_priv *priv) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); + return; + } + + if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); + return; + } + + ret = iwl_legacy_send_scan_abort(priv); + if (ret) { + IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret); + iwl_legacy_force_scan_end(priv); + } else + IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n"); +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + */ +int iwl_legacy_scan_cancel(struct iwl_priv *priv) +{ + IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); + queue_work(priv->workqueue, &priv->abort_scan); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_scan_cancel); + +/** + * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + */ +int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(ms); + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); + + iwl_legacy_do_scan_abort(priv); + + while (time_before_eq(jiffies, timeout)) { + if (!test_bit(STATUS_SCAN_HW, &priv->status)) + break; + msleep(20); + } + + return test_bit(STATUS_SCAN_HW, &priv->status); +} +EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout); + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN(priv, "Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); +#endif +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; +#endif + + IWL_DEBUG_SCAN(priv, + "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", + (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + jiffies_to_msecs(jiffies - priv->scan_start)); + + queue_work(priv->workqueue, &priv->scan_completed); +} + +void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv) +{ + /* scan handlers */ + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = + iwl_legacy_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_legacy_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_legacy_rx_scan_complete_notif; +} +EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers); + +inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes) +{ + if (band == IEEE80211_BAND_5GHZ) + return IWL_ACTIVE_DWELL_TIME_52 + + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); + else + return IWL_ACTIVE_DWELL_TIME_24 + + IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); +} +EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time); + +u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + struct iwl_rxon_context *ctx; + u16 passive = (band == IEEE80211_BAND_2GHZ) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_legacy_is_any_associated(priv)) { + /* + * If we're associated, we clamp the maximum passive + * dwell time to be 98% of the smallest beacon interval + * (minus 2 * channel tune time) + */ + for_each_context(priv, ctx) { + u16 value; + + if (!iwl_legacy_is_associated_ctx(ctx)) + continue; + value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0; + if ((value > IWL_PASSIVE_DWELL_BASE) || !value) + value = IWL_PASSIVE_DWELL_BASE; + value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + passive = min(value, passive); + } + } + + return passive; +} +EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time); + +void iwl_legacy_init_scan_params(struct iwl_priv *priv) +{ + u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; + if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; +} +EXPORT_SYMBOL(iwl_legacy_init_scan_params); + +static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool internal, + enum ieee80211_band band) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (WARN_ON(!priv->cfg->ops->utils->request_scan)) + return -EOPNOTSUPP; + + cancel_delayed_work(&priv->scan_check); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_WARN(priv, "Request scan called when driver not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_SCAN(priv, + "Multiple concurrent scan requests in parallel.\n"); + return -EBUSY; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); + return -EBUSY; + } + + IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", + internal ? "internal short " : ""); + + set_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = internal; + priv->scan_start = jiffies; + priv->scan_band = band; + + ret = priv->cfg->ops->utils->request_scan(priv, vif); + if (ret) { + clear_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = false; + return ret; + } + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + return 0; +} + +int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_SCANNING, &priv->status) && + !priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + ret = -EAGAIN; + goto out_unlock; + } + + /* mac80211 will only ask for one band at a time */ + priv->scan_request = req; + priv->scan_vif = vif; + + /* + * If an internal scan is in progress, just set + * up the scan_request as per above. + */ + if (priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); + ret = 0; + } else + ret = iwl_legacy_scan_initiate(priv, vif, false, + req->channels[0]->band); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +out_unlock: + mutex_unlock(&priv->mutex); + + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_hw_scan); + +/* + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ +void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv) +{ + queue_work(priv->workqueue, &priv->start_internal_scan); +} + +static void iwl_legacy_bg_start_internal_scan(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, start_internal_scan); + + IWL_DEBUG_SCAN(priv, "Start internal scan\n"); + + mutex_lock(&priv->mutex); + + if (priv->is_internal_short_scan == true) { + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); + goto unlock; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + goto unlock; + } + + if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band)) + IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); + unlock: + mutex_unlock(&priv->mutex); +} + +static void iwl_legacy_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + IWL_DEBUG_SCAN(priv, "Scan check work\n"); + + /* Since we are here firmware does not finish scan and + * most likely is in bad shape, so we don't bother to + * send abort command, just force scan complete to mac80211 */ + mutex_lock(&priv->mutex); + iwl_legacy_force_scan_end(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request + */ + +u16 +iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ies, int ie_len, int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, iwl_bcast_addr, ETH_ALEN); + memcpy(frame->sa, ta, ETH_ALEN); + memcpy(frame->bssid, iwl_bcast_addr, ETH_ALEN); + frame->seq_ctrl = 0; + + len += 24; + + /* ...next IE... */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our indirect SSID IE */ + left -= 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + len += 2; + + if (WARN_ON(left < ie_len)) + return len; + + if (ies && ie_len) { + memcpy(pos, ies, ie_len); + len += ie_len; + } + + return (u16)len; +} +EXPORT_SYMBOL(iwl_legacy_fill_probe_req); + +static void iwl_legacy_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); + + IWL_DEBUG_SCAN(priv, "Abort scan work\n"); + + /* We keep scan_check work queued in case when firmware will not + * report back scan completed notification */ + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 200); + mutex_unlock(&priv->mutex); +} + +static void iwl_legacy_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + bool aborted; + + IWL_DEBUG_SCAN(priv, "Completed %sscan.\n", + priv->is_internal_short_scan ? "internal short " : ""); + + cancel_delayed_work(&priv->scan_check); + + mutex_lock(&priv->mutex); + + aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); + if (aborted) + IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); + + if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); + goto out_settings; + } + + if (priv->is_internal_short_scan && !aborted) { + int err; + + /* Check if mac80211 requested scan during our internal scan */ + if (priv->scan_request == NULL) + goto out_complete; + + /* If so request a new scan */ + err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false, + priv->scan_request->channels[0]->band); + if (err) { + IWL_DEBUG_SCAN(priv, + "failed to initiate pending scan: %d\n", err); + aborted = true; + goto out_complete; + } + + goto out; + } + +out_complete: + iwl_legacy_complete_scan(priv, aborted); + +out_settings: + /* Can we still talk to firmware ? */ + if (!iwl_legacy_is_ready_rf(priv)) + goto out; + + /* + * We do not commit power settings while scan is pending, + * do it now if the settings changed. + */ + iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, + false); + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); + + priv->cfg->ops->utils->post_scan(priv); + +out: + mutex_unlock(&priv->mutex); +} + +void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed); + INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan); + INIT_WORK(&priv->start_internal_scan, + iwl_legacy_bg_start_internal_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check); +} +EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work); + +void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->start_internal_scan); + cancel_work_sync(&priv->abort_scan); + cancel_work_sync(&priv->scan_completed); + + if (cancel_delayed_work_sync(&priv->scan_check)) { + mutex_lock(&priv->mutex); + iwl_legacy_force_scan_end(priv); + mutex_unlock(&priv->mutex); + } +} +EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work); diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h new file mode 100644 index 000000000000..9f70a4723103 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h @@ -0,0 +1,92 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_spectrum_h__ +#define __iwl_legacy_spectrum_h__ +enum { /* ieee80211_basic_report.map */ + IEEE80211_BASIC_MAP_BSS = (1 << 0), + IEEE80211_BASIC_MAP_OFDM = (1 << 1), + IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), + IEEE80211_BASIC_MAP_RADAR = (1 << 3), + IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct ieee80211_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __packed; + +enum { /* ieee80211_measurement_request.mode */ + /* Bit 0 is reserved */ + IEEE80211_MEASUREMENT_ENABLE = (1 << 1), + IEEE80211_MEASUREMENT_REQUEST = (1 << 2), + IEEE80211_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + IEEE80211_REPORT_BASIC = 0, /* required */ + IEEE80211_REPORT_CCA = 1, /* optional */ + IEEE80211_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct ieee80211_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __packed; + +struct ieee80211_info_element { + u8 id; + u8 len; + u8 data[0]; +} __packed; + +struct ieee80211_measurement_request { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + struct ieee80211_measurement_params params[0]; +} __packed; + +struct ieee80211_measurement_report { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct ieee80211_basic_report basic[0]; + } u; +} __packed; + +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c new file mode 100644 index 000000000000..47c9da3834ea --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-sta.c @@ -0,0 +1,816 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/lockdep.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" + +/* priv->sta_lock must be held */ +static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) +{ + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) + IWL_ERR(priv, + "ACTIVATE a non DRIVER active station id %u addr %pM\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + + if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_ASSOC(priv, + "STA id %u addr %pM already present" + " in uCode (according to driver)\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } else { + priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } +} + +static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *addsta, + struct iwl_rx_packet *pkt, + bool sync) +{ + u8 sta_id = addsta->sta.sta_id; + unsigned long flags; + int ret = -EIO; + + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", + pkt->hdr.flags); + return ret; + } + + IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", + sta_id); + + spin_lock_irqsave(&priv->sta_lock, flags); + + switch (pkt->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); + iwl_legacy_sta_ucode_activate(priv, sta_id); + ret = 0; + break; + case ADD_STA_NO_ROOM_IN_TABLE: + IWL_ERR(priv, "Adding station %d failed, no room in table.\n", + sta_id); + break; + case ADD_STA_NO_BLOCK_ACK_RESOURCE: + IWL_ERR(priv, + "Adding station %d failed, no block ack resource.\n", + sta_id); + break; + case ADD_STA_MODIFY_NON_EXIST_STA: + IWL_ERR(priv, "Attempting to modify non-existing station %d\n", + sta_id); + break; + default: + IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", + pkt->u.add_sta.status); + break; + } + + IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + sta_id, priv->stations[sta_id].sta.sta.addr); + + /* + * XXX: The MAC address in the command buffer is often changed from + * the original sent to the device. That is, the MAC address + * written to the command buffer often is not the same MAC adress + * read from the command buffer when the command returns. This + * issue has not yet been resolved and this debugging is left to + * observe the problem. + */ + IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + addsta->sta.addr); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + struct iwl_legacy_addsta_cmd *addsta = + (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; + + iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); + +} + +int iwl_legacy_send_add_sta(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *pkt = NULL; + int ret = 0; + u8 data[sizeof(*sta)]; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .flags = flags, + .data = data, + }; + u8 sta_id __maybe_unused = sta->sta.sta_id; + + IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", + sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); + + if (flags & CMD_ASYNC) + cmd.callback = iwl_legacy_add_sta_callback; + else { + cmd.flags |= CMD_WANT_SKB; + might_sleep(); + } + + cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); + ret = iwl_legacy_send_cmd(priv, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + if (ret == 0) { + pkt = (struct iwl_rx_packet *)cmd.reply_page; + ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); + } + iwl_legacy_free_pages(priv, cmd.reply_page); + + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_add_sta); + +static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, + struct ieee80211_sta *sta, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + __le32 sta_flags; + u8 mimo_ps_mode; + + if (!sta || !sta_ht_inf->ht_supported) + goto done; + + mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; + IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? + "static" : + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? + "dynamic" : "disabled"); + + sta_flags = priv->stations[index].sta.station_flags; + + sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); + + switch (mimo_ps_mode) { + case WLAN_HT_CAP_SM_PS_STATIC: + sta_flags |= STA_FLG_MIMO_DIS_MSK; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + break; + default: + IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); + break; + } + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + sta_flags |= STA_FLG_HT40_EN_MSK; + else + sta_flags &= ~STA_FLG_HT40_EN_MSK; + + priv->stations[index].sta.station_flags = sta_flags; + done: + return; +} + +/** + * iwl_legacy_prep_station - Prepare station information for addition + * + * should be called with sta_lock held + */ +u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta) +{ + struct iwl_station_entry *station; + int i; + u8 sta_id = IWL_INVALID_STATION; + u16 rate; + + if (is_ap) + sta_id = ctx->ap_sta_id; + else if (is_broadcast_ether_addr(addr)) + sta_id = ctx->bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + sta_id = i; + break; + } + + if (!priv->stations[i].used && + sta_id == IWL_INVALID_STATION) + sta_id = i; + } + + /* + * These two conditions have the same outcome, but keep them + * separate + */ + if (unlikely(sta_id == IWL_INVALID_STATION)) + return sta_id; + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, + "STA %d already in process of being added.\n", + sta_id); + return sta_id; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && + !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { + IWL_DEBUG_ASSOC(priv, + "STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + return sta_id; + } + + station = &priv->stations[sta_id]; + station->used = IWL_STA_DRIVER_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", + sta_id, addr); + priv->num_stations++; + + /* Set up the REPLY_ADD_STA command to send to device */ + memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = sta_id; + station->sta.station_flags = ctx->station_flags; + station->ctxid = ctx->ctxid; + + if (sta) { + struct iwl_station_priv_common *sta_priv; + + sta_priv = (void *)sta->drv_priv; + sta_priv->ctx = ctx; + } + + /* + * OK to call unconditionally, since local stations (IBSS BSSID + * STA and broadcast STA) pass in a NULL sta, and mac80211 + * doesn't allow HT IBSS. + */ + iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); + + /* 3945 only */ + rate = (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); + + return sta_id; + +} +EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); + +#define STA_WAIT_TIMEOUT (HZ/2) + +/** + * iwl_legacy_add_station_common - + */ +int +iwl_legacy_add_station_common(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r) +{ + unsigned long flags_spin; + int ret = 0; + u8 sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + *sta_id_r = 0; + spin_lock_irqsave(&priv->sta_lock, flags_spin); + sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare station %pM for addition\n", + addr); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EINVAL; + } + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, + "STA %d already in process of being added.\n", + sta_id); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EEXIST; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, + "STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EEXIST; + } + + priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + /* Add station to device's station table */ + ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[sta_id].sta.sta.addr); + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + } + *sta_id_r = sta_id; + return ret; +} +EXPORT_SYMBOL(iwl_legacy_add_station_common); + +/** + * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station + * + * priv->sta_lock must be held + */ +static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) +{ + /* Ucode must be active and driver must be non active */ + if ((priv->stations[sta_id].used & + (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != + IWL_STA_UCODE_ACTIVE) + IWL_ERR(priv, "removed non active STA %u\n", sta_id); + + priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; + + memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); + IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); +} + +static int iwl_legacy_send_remove_station(struct iwl_priv *priv, + const u8 *addr, int sta_id, + bool temporary) +{ + struct iwl_rx_packet *pkt; + int ret; + + unsigned long flags_spin; + struct iwl_rem_sta_cmd rm_sta_cmd; + + struct iwl_host_cmd cmd = { + .id = REPLY_REMOVE_STA, + .len = sizeof(struct iwl_rem_sta_cmd), + .flags = CMD_SYNC, + .data = &rm_sta_cmd, + }; + + memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); + rm_sta_cmd.num_sta = 1; + memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); + + cmd.flags |= CMD_WANT_SKB; + + ret = iwl_legacy_send_cmd(priv, &cmd); + + if (ret) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", + pkt->hdr.flags); + ret = -EIO; + } + + if (!ret) { + switch (pkt->u.rem_sta.status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + iwl_legacy_sta_ucode_deactivate(priv, sta_id); + spin_unlock_irqrestore(&priv->sta_lock, + flags_spin); + } + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; + } + } + iwl_legacy_free_pages(priv, cmd.reply_page); + + return ret; +} + +/** + * iwl_legacy_remove_station - Remove driver's knowledge of station. + */ +int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr) +{ + unsigned long flags; + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Unable to remove station %pM, device not ready.\n", + addr); + /* + * It is typical for stations to be removed when we are + * going down. Return success since device will be down + * soon anyway + */ + return 0; + } + + IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", + sta_id, addr); + + if (WARN_ON(sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", + addr); + goto out_err; + } + + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", + addr); + goto out_err; + } + + if (priv->stations[sta_id].used & IWL_STA_LOCAL) { + kfree(priv->stations[sta_id].lq); + priv->stations[sta_id].lq = NULL; + } + + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + + priv->num_stations--; + + BUG_ON(priv->num_stations < 0); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_remove_station(priv, addr, sta_id, false); +out_err: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); + +/** + * iwl_legacy_clear_ucode_stations - clear ucode station table bits + * + * This function clears all the bits in the driver indicating + * which stations are active in the ucode. Call when something + * other than explicit station management would cause this in + * the ucode, e.g. unassociated RXON. + */ +void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int i; + unsigned long flags_spin; + bool cleared = false; + + IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (ctx && ctx->ctxid != priv->stations[i].ctxid) + continue; + + if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_INFO(priv, + "Clearing ucode active for station %d\n", i); + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + cleared = true; + } + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + if (!cleared) + IWL_DEBUG_INFO(priv, + "No active stations found to be cleared\n"); +} +EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); + +/** + * iwl_legacy_restore_stations() - Restore driver known stations to device + * + * All stations considered active by driver, but not present in ucode, is + * restored. + * + * Function sleeps. + */ +void +iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_addsta_cmd sta_cmd; + struct iwl_link_quality_cmd lq; + unsigned long flags_spin; + int i; + bool found = false; + int ret; + bool send_lq; + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Not ready yet, not restoring any stations.\n"); + return; + } + + IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (ctx->ctxid != priv->stations[i].ctxid) + continue; + if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && + !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].sta.mode = 0; + priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; + found = true; + } + } + + for (i = 0; i < priv->hw_params.max_stations; i++) { + if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { + memcpy(&sta_cmd, &priv->stations[i].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + send_lq = false; + if (priv->stations[i].lq) { + memcpy(&lq, priv->stations[i].lq, + sizeof(struct iwl_link_quality_cmd)); + send_lq = true; + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].used &= + ~IWL_STA_DRIVER_ACTIVE; + priv->stations[i].used &= + ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, + flags_spin); + } + /* + * Rate scaling has already been initialized, send + * current LQ command + */ + if (send_lq) + iwl_legacy_send_lq_cmd(priv, ctx, &lq, + CMD_SYNC, true); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; + } + } + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + if (!found) + IWL_DEBUG_INFO(priv, "Restoring all known stations" + " .... no stations to be restored.\n"); + else + IWL_DEBUG_INFO(priv, "Restoring all known stations" + " .... complete.\n"); +} +EXPORT_SYMBOL(iwl_legacy_restore_stations); + +int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < priv->sta_key_max_num; i++) + if (!test_and_set_bit(i, &priv->ucode_key_table)) + return i; + + return WEP_INVALID_OFFSET; +} +EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); + +void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (!(priv->stations[i].used & IWL_STA_BCAST)) + continue; + + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + priv->num_stations--; + BUG_ON(priv->num_stations < 0); + kfree(priv->stations[i].lq); + priv->stations[i].lq = NULL; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); +} +EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ + int i; + IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +} +#else +static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ +} +#endif + +/** + * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity + * + * It sometimes happens when a HT rate has been in use and we + * loose connectivity with AP then mac80211 will first tell us that the + * current channel is not HT anymore before removing the station. In such a + * scenario the RXON flags will be updated to indicate we are not + * communicating HT anymore, but the LQ command may still contain HT rates. + * Test for this to prevent driver from sending LQ command between the time + * RXON flags are updated and when LQ command is updated. + */ +static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq) +{ + int i; + + if (ctx->ht.enabled) + return true; + + IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", + ctx->active.channel); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & + RATE_MCS_HT_MSK) { + IWL_DEBUG_INFO(priv, + "index %d of LQ expects HT channel\n", + i); + return false; + } + } + return true; +} + +/** + * iwl_legacy_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, u8 flags, bool init) +{ + int ret = 0; + unsigned long flags_spin; + + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = sizeof(struct iwl_link_quality_cmd), + .flags = flags, + .data = lq, + }; + + if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EINVAL; + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + iwl_legacy_dump_lq_cmd(priv, lq); + BUG_ON(init && (cmd.flags & CMD_ASYNC)); + + if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) + ret = iwl_legacy_send_cmd(priv, &cmd); + else + ret = -EINVAL; + + if (cmd.flags & CMD_ASYNC) + return ret; + + if (init) { + IWL_DEBUG_INFO(priv, "init LQ command complete," + " clearing sta addition status for sta %d\n", + lq->sta_id); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + } + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); + +int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; + int ret; + + IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", + sta->addr); + ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); + if (ret) + IWL_ERR(priv, "Error removing station %pM\n", + sta->addr); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h new file mode 100644 index 000000000000..67bd75fe01a1 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-sta.h @@ -0,0 +1,148 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_legacy_sta_h__ +#define __iwl_legacy_sta_h__ + +#include "iwl-dev.h" + +#define HW_KEY_DYNAMIC 0 +#define HW_KEY_DEFAULT 1 + +#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ +#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ +#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of + being activated */ +#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211; + (this is for the IBSS BSSID stations) */ +#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ + + +void iwl_legacy_restore_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv); +int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv); +int iwl_legacy_send_add_sta(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *sta, u8 flags); +int iwl_legacy_add_station_common(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r); +int iwl_legacy_remove_station(struct iwl_priv *priv, + const u8 sta_id, + const u8 *addr); +int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +u8 iwl_legacy_prep_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta); + +int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, + u8 flags, bool init); + +/** + * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver + * @priv: iwl priv struct + * + * This is called during iwl_down() to make sure that in the case + * we're coming there from a hardware restart mac80211 will be + * able to reconfigure stations -- if we're getting there in the + * normal down flow then the stations will already be cleared. + */ +static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rxon_context *ctx; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(priv->stations, 0, sizeof(priv->stations)); + priv->num_stations = 0; + + priv->ucode_key_table = 0; + + for_each_context(priv, ctx) { + /* + * Remove all key information that is not stored as part + * of station information since mac80211 may not have had + * a chance to remove all the keys. When device is + * reconfigured by mac80211 after an error all keys will + * be reconfigured. + */ + memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); + ctx->key_mapping_keys = 0; + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta) +{ + if (WARN_ON(!sta)) + return IWL_INVALID_STATION; + + return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id; +} + +/** + * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta + * @priv: iwl priv + * @context: the current context + * @sta: mac80211 station + * + * In certain circumstances mac80211 passes a station pointer + * that may be %NULL, for example during TX or key setup. In + * that case, we need to use the broadcast station, so this + * inline wraps that pattern. + */ +static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv, + struct iwl_rxon_context *context, + struct ieee80211_sta *sta) +{ + int sta_id; + + if (!sta) + return context->bcast_sta_id; + + sta_id = iwl_legacy_sta_id(sta); + + /* + * mac80211 should not be passing a partially + * initialised station! + */ + WARN_ON(sta_id == IWL_INVALID_STATION); + + return sta_id; +} +#endif /* __iwl_legacy_sta_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c new file mode 100644 index 000000000000..7db8340d1c07 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c @@ -0,0 +1,637 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <net/mac80211.h> +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/** + * iwl_legacy_txq_update_write_ptr - Send new write index to hardware + */ +void +iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Tx queue %d requesting wakeup," + " GP1 = 0x%x\n", txq_id, reg); + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return; + } + + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + + /* + * else not in power-save mode, + * uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + txq->need_update = 0; +} +EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr); + +/** + * iwl_legacy_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct device *dev = &priv->pci_dev->dev; + int i; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->write_ptr != q->read_ptr; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i < TFD_TX_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * + txq->q.n_bd, txq->tfds, txq->q.dma_addr); + + /* De-alloc array of per-TFD driver data */ + kfree(txq->txb); + txq->txb = NULL; + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_free); + +/** + * iwl_legacy_cmd_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_legacy_cmd_queue_free(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_queue *q = &txq->q; + struct device *dev = &priv->pci_dev->dev; + int i; + bool huge = false; + + if (q->n_bd == 0) + return; + + for (; q->read_ptr != q->write_ptr; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + /* we have no way to tell if it is a huge cmd ATM */ + i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); + + if (txq->meta[i].flags & CMD_SIZE_HUGE) { + huge = true; + continue; + } + + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } + if (huge) { + i = q->n_window; + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } + + /* De-alloc array of command/tx buffers */ + for (i = 0; i <= TFD_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, + txq->tfds, txq->q.dma_addr); + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_legacy_cmd_queue_free); + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * See more detailed info in iwl-4965-hw.h. + ***************************************************/ + +int iwl_legacy_queue_space(const struct iwl_queue *q) +{ + int s = q->read_ptr - q->write_ptr; + + if (q->read_ptr > q->write_ptr) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_legacy_queue_space); + + +/** + * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap + * and iwl_legacy_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * iwl_legacy_get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; + + return 0; +} + +/** + * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue + */ +static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct device *dev = &priv->pci_dev->dev; + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; + + /* Driver private data, only for Tx (not command) queues, + * not shared with device. */ + if (id != priv->cmd_queue) { + txq->txb = kzalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERR(priv, "kmalloc for auxiliary BD " + "structures failed\n"); + goto error; + } + } else { + txq->txb = NULL; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, + GFP_KERNEL); + if (!txq->tfds) { + IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); + goto error; + } + txq->q.id = id; + + return 0; + + error: + kfree(txq->txb); + txq->txb = NULL; + + return -ENOMEM; +} + +/** + * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue + */ +int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int i, len; + int ret; + int actual_slots = slots_num; + + /* + * Alloc buffer array for commands (Tx or other types of commands). + * For the command queue (#4/#9), allocate command space + one big + * command for scan, since scan command is very huge; the system will + * not have two scans at the same time, so only one is needed. + * For normal Tx queues (all other queues), no super-size command + * space is needed. + */ + if (txq_id == priv->cmd_queue) + actual_slots++; + + txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, + GFP_KERNEL); + txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, + GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto out_free_arrays; + + len = sizeof(struct iwl_device_cmd); + for (i = 0; i < actual_slots; i++) { + /* only happens for cmd queue */ + if (i == slots_num) + len = IWL_MAX_CMD_SIZE; + + txq->cmd[i] = kmalloc(len, GFP_KERNEL); + if (!txq->cmd[i]) + goto err; + } + + /* Alloc driver data array and TFD circular buffer */ + ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id); + if (ret) + goto err; + + txq->need_update = 0; + + /* + * For the default queues 0-3, set up the swq_id + * already -- all others need to get one later + * (if they need one at all). + */ + if (txq_id < 4) + iwl_legacy_set_swq_id(txq, txq_id, txq_id); + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_legacy_queue_init(priv, &txq->q, + TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); + + return 0; +err: + for (i = 0; i < actual_slots; i++) + kfree(txq->cmd[i]); +out_free_arrays: + kfree(txq->meta); + kfree(txq->cmd); + + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_init); + +void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int actual_slots = slots_num; + + if (txq_id == priv->cmd_queue) + actual_slots++; + + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); + + txq->need_update = 0; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_legacy_queue_init(priv, &txq->q, + TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_reset); + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/** + * iwl_legacy_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + unsigned long flags; + int len; + u32 idx; + u16 fix_size; + + cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); + fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries + * Also, check to see if command buffer should not exceed the size + * of device_cmd and max_cmd_size. */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->flags & CMD_SIZE_HUGE)); + BUG_ON(fix_size > IWL_MAX_CMD_SIZE); + + if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) { + IWL_WARN(priv, "Not sending command - %s KILL\n", + iwl_legacy_is_rfkill(priv) ? "RF" : "CT"); + return -EIO; + } + + if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERR(priv, "No space in command queue\n"); + IWL_ERR(priv, "Restarting adapter due to queue full\n"); + queue_work(priv->workqueue, &priv->restart); + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + /* If this is a huge cmd, mark the huge flag also on the meta.flags + * of the _original_ cmd. This is used for DMA mapping clean up. + */ + if (cmd->flags & CMD_SIZE_HUGE) { + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); + txq->meta[idx].flags = CMD_SIZE_HUGE; + } + + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + out_meta->flags = cmd->flags; + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + if (cmd->flags & CMD_ASYNC) + out_meta->callback = cmd->callback; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + if (cmd->flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; + len = sizeof(struct iwl_device_cmd); + if (idx == TFD_CMD_SLOTS) + len = IWL_MAX_CMD_SIZE; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + switch (out_cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, + "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, priv->cmd_queue); + break; + default: + IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, priv->cmd_queue); + } +#endif + txq->need_update = 1; + + if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) + /* Set up entry in queue's byte count circular buffer */ + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); + + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, + fix_size, PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, phys_addr); + dma_unmap_len_set(out_meta, len, fix_size); + + trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr, + fix_size, cmd->flags); + + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, fix_size, 1, + U32_PAD(cmd->len)); + + /* Increment and update queue's write index */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return idx; +} + +/** + * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, + int idx, int cmd_idx) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + idx, q->n_bd, q->write_ptr, q->read_ptr); + return; + } + + for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + if (nfreed++ > 0) { + IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, + q->write_ptr, q->read_ptr); + queue_work(priv->workqueue, &priv->restart); + } + + } +} + +/** + * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +void +iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index; + bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN(txq_id != priv->cmd_queue, + "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", + txq_id, priv->cmd_queue, sequence, + priv->txq[priv->cmd_queue].q.read_ptr, + priv->txq[priv->cmd_queue].q.write_ptr)) { + iwl_print_hex_error(priv, pkt, 32); + return; + } + + /* If this is a huge cmd, clear the huge flag on the meta.flags + * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap + * the DMA buffer for the scan (huge) command. + */ + if (huge) { + cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0); + txq->meta[cmd_index].flags = 0; + } + cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); + cmd = txq->cmd[cmd_index]; + meta = &txq->meta[cmd_index]; + + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), + PCI_DMA_BIDIRECTIONAL); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + meta->source->reply_page = (unsigned long)rxb_addr(rxb); + rxb->page = NULL; + } else if (meta->callback) + meta->callback(priv, cmd, pkt); + + iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); + + if (!(meta->flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd)); + wake_up_interruptible(&priv->wait_command_queue); + } + meta->flags = 0; +} +EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c new file mode 100644 index 000000000000..ef94d161b783 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c @@ -0,0 +1,4294 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci-aspm.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/ieee80211_radiotap.h> +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define DRV_NAME "iwl3945" + +#include "iwl-fh.h" +#include "iwl-3945-fh.h" +#include "iwl-commands.h" +#include "iwl-sta.h" +#include "iwl-3945.h" +#include "iwl-core.h" +#include "iwl-helpers.h" +#include "iwl-dev.h" +#include "iwl-spectrum.h" + +/* + * module name, copyright, version, etc. + */ + +#define DRV_DESCRIPTION \ +"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +/* + * add "s" to indicate spectrum measurement included. + * we add it here to be consistent with previous releases in which + * this was configurable. + */ +#define DRV_VERSION IWLWIFI_VERSION VD "s" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "<ilw@linux.intel.com>" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + + /* module parameters */ +struct iwl_mod_params iwl3945_mod_params = { + .sw_crypto = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +/** + * iwl3945_get_antenna_flags - Get antenna flags for RXON command + * @priv: eeprom and antenna fields are used to determine antenna flags + * + * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed + * iwl3945_mod_params.antenna specifies the antenna diversity mode: + * + * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself + * IWL_ANTENNA_MAIN - Force MAIN antenna + * IWL_ANTENNA_AUX - Force AUX antenna + */ +__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) +{ + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + + switch (iwl3945_mod_params.antenna) { + case IWL_ANTENNA_DIVERSITY: + return 0; + + case IWL_ANTENNA_MAIN: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + + case IWL_ANTENNA_AUX: + if (eeprom->antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + } + + /* bad antenna selector value */ + IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", + iwl3945_mod_params.antenna); + + return 0; /* "diversity" is default if error */ +} + +static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + int ret; + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + + if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = keyconf->keyidx; + key_flags &= ~STA_KEY_FLG_INVALID; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); + + ret = iwl_legacy_send_add_sta(priv, + &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + return -EOPNOTSUPP; +} + +static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + struct iwl_legacy_addsta_cmd sta_cmd; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl3945_set_dynamic_key(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret = 0; + + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); + break; + default: + IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__, + keyconf->cipher); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} + +static int iwl3945_remove_static_key(struct iwl_priv *priv) +{ + int ret = -EOPNOTSUPP; + + return ret; +} + +static int iwl3945_set_static_key(struct iwl_priv *priv, + struct ieee80211_key_conf *key) +{ + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) + return -EOPNOTSUPP; + + IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher); + return -EINVAL; +} + +static void iwl3945_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl3945_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) +{ + struct iwl3945_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl3945_frame, list); +} + +static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + + if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) + return 0; + + if (priv->beacon_skb->len > left) + return 0; + + memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); + + return priv->beacon_skb->len; +} + +static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl3945_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl3945_get_free_frame(priv); + + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + rate = iwl_legacy_get_lowest_plcp(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + + frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl3945_free_frame(priv, frame); + + return rc; +} + +static void iwl3945_unset_hw_params(struct iwl_priv *priv) +{ + if (priv->_3945.shared_virt) + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct iwl3945_shared), + priv->_3945.shared_virt, + priv->_3945.shared_phys); +} + +static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_device_cmd *cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; + + tx_cmd->sec_ctl = 0; + + switch (keyinfo->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | + (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", info->control.hw_key->hw_key_idx); + break; + + default: + IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, u8 std_id) +{ + struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; + __le32 tx_flags = tx_cmd->tx_flags; + __le16 fc = hdr->frame_control; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +/* + * start REPLY_TX command process + */ +static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl3945_tx_cmd *tx_cmd; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + int txq_id = skb_get_queue_mapping(skb); + u16 len, idx, hdr_len; + u8 id; + u8 unicast; + u8 sta_id; + u8 tid = 0; + __le16 fc; + u8 wait_write_ptr = 0; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { + IWL_ERR(priv, "ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_hdrlen(fc); + + /* Find index into station table for destination station */ + sta_id = iwl_legacy_sta_id_or_broadcast( + priv, &priv->contexts[IWL_RXON_CTX_BSS], + info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop; + } + + IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (unlikely(tid >= MAX_TID_COUNT)) + goto drop; + } + + /* Descriptor for chosen Tx queue */ + txq = &priv->txq[txq_id]; + q = &txq->q; + + if ((iwl_legacy_queue_space(q) < q->high_mark)) + goto drop; + + spin_lock_irqsave(&priv->lock, flags); + + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + /* Init first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(*tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + if (info->control.hw_key) + iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); + iwl_legacy_update_stats(priv, true, fc, len); + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, + ieee80211_hdrlen(fc)); + + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl3945_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + len = (len + 3) & ~3; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, + len, PCI_DMA_TODEVICE); + /* we do not map meta data ... so we can safely access address to + * provide to unmap command*/ + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, len); + + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, len, 1, 0); + + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, len, + 0, U32_PAD(len)); + } + + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if ((iwl_legacy_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + iwl_legacy_stop_queue(priv, txq); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +static int iwl3945_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) + add_time = iwl_legacy_usecs_to_beacons(priv, + le64_to_cpu(params->start_time) - priv->_3945.last_tsf, + le16_to_cpu(ctx->timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) + spectrum.start_time = + iwl_legacy_add_beacon_time(priv, + priv->_3945.last_beacon_time, add_time, + le16_to_cpu(ctx->timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_legacy_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (pkt->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", + pkt->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + + return rc; +} + +static void iwl3945_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + iwl3945_disable_events(priv); + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); +#endif + + IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); +} + +static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u8 rate = beacon->beacon_notify_hdr.rate; + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); + +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + iwl_legacy_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_legacy_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_legacy_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; + + iwl_legacy_setup_rx_scan_handlers(priv); + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; + + /* Set up hardware specific Rx handlers */ + iwl3945_hw_rx_handler_setup(priv); +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl3945_rx_queue_restock + * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl3945_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl3945_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)dma_addr); +} + +/** + * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static void iwl3945_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if ((rxq->write_actual != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl3945_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + spin_unlock_irqrestore(&rxq->lock, flags); + + rxb->page = page; + /* Get physical address of RB/SKB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +void iwl3945_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + unsigned long flags; + + iwl3945_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl3945_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl3945_rx_replenish_now(struct iwl_priv *priv) +{ + iwl3945_rx_allocate(priv, GFP_ATOMIC); + + iwl3945_rx_queue_restock(priv); +} + + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl3945_calc_db_from_ratio(int sig_ratio) +{ + /* 1000:1 or higher just report as 60 dB */ + if (sig_ratio >= 1000) + return 60; + + /* 100:1 or higher, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio >= 100) + return 20 + (int)ratio2dB[sig_ratio/10]; + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +/** + * iwl3945_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl3945_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty = 0; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_legacy_dev_rx(priv, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl3945_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, + iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_legacy_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_legacy_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode won't assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl3945_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl3945_rx_replenish_now(priv); + else + iwl3945_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl3945_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static const char *iwl3945_desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl3945_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 i; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); + return; + } + + + count = iwl_legacy_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + IWL_ERR(priv, "Desc Time asrtPC blink2 " + "ilink1 nmiPC Line\n"); + for (i = ERROR_START_OFFSET; + i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; + i += ERROR_ELEM_SIZE) { + desc = iwl_legacy_read_targ_mem(priv, base + i); + time = + iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32)); + blink1 = + iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32)); + blink2 = + iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32)); + ilink1 = + iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32)); + ilink2 = + iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32)); + data1 = + iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32)); + + IWL_ERR(priv, + "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + iwl3945_desc_lookup(desc), desc, time, blink1, blink2, + ilink1, ilink2, data1); + trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0, + 0, blink1, blink2, ilink1, ilink2); + } +} + +#define EVENT_START_OFFSET (6 * sizeof(u32)) + +/** + * iwl3945_print_event_log - Dump error event log to syslog + * + */ +static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (num_events == 0) + return pos; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + /* Set starting address; reads will auto-increment */ + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "0x%08x:%04u\n", + time, ev); + } else { + IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); + trace_iwlwifi_legacy_dev_ucode_event(priv, 0, + time, ev); + } + } else { + data = _iwl_legacy_read_direct32(priv, + HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", + time, data, ev); + trace_iwlwifi_legacy_dev_ucode_event(priv, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return pos; +} + +/** + * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl3945_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl3945_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl3945_print_event_log(priv, next_entry - size, + size, mode, + pos, buf, bufsz); + } else { + if (next_entry < size) + pos = iwl3945_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + else + pos = iwl3945_print_event_log(priv, next_entry - size, + size, mode, + pos, buf, bufsz); + } + return pos; +} + +#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + int pos = 0; + size_t bufsz = 0; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl3945_hw_valid_rtc_data_addr(base)) { + IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_legacy_read_targ_mem(priv, base); + mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); + + if (capacity > priv->cfg->base_params->max_event_log_size) { + IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", + capacity, priv->cfg->base_params->max_event_log_size); + capacity = priv->cfg->base_params->max_event_log_size; + } + + if (next_entry > priv->cfg->base_params->max_event_log_size) { + IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", + next_entry, priv->cfg->base_params->max_event_log_size); + next_entry = priv->cfg->base_params->max_event_log_size; + } + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) + size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; +#endif + + IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", + size); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + /* if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl3945_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + + /* (then/else) start at top of log */ + pos = iwl3945_print_event_log(priv, 0, next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +static void iwl3945_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR39_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR39_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_legacy_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_legacy_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + "Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + iwl_legacy_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]); + + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl3945_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "Tx interrupt\n"); + priv->isr_stats.tx++; + + iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); + iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT + (FH39_SRVC_CHNL), 0x0); + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~priv->inta_mask) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + struct iwl3945_scan_channel *scan_ch) +{ + const struct ieee80211_supported_band *sband; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added = 0; + u8 channel = 0; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) { + IWL_ERR(priv, "invalid band\n"); + return added; + } + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + + channel = iwl_legacy_get_single_channel_number(priv, band); + + if (channel) { + scan_ch->channel = channel; + scan_ch->type = 0; /* passive */ + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* Set txpower levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + added++; + } else + IWL_ERR(priv, "no valid channel found\n"); + return added; +} + +static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl3945_scan_channel *scan_ch, + struct ieee80211_vif *vif) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + scan_ch->channel = chan->hw_value; + + ch_info = iwl_legacy_get_channel_info(priv, band, + scan_ch->channel); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, + "Channel %d is INVALID for this band.\n", + scan_ch->channel); + continue; + } + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* If passive , set up for auto-switch + * and use long active_dwell time. + */ + if (!is_active || iwl_legacy_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { + scan_ch->type = 0; /* passive */ + if (IWL_UCODE_API(priv->ucode_ver) == 1) + scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); + } else { + scan_ch->type = 1; /* active */ + } + + /* Set direct probe bits. These may be used both for active + * scan channels (probes gets sent right away), + * or for passive channels (probes get se sent only after + * hearing clear Rx packet).*/ + if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + if (n_probes) + scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); + } else { + /* uCode v1 does not allow setting direct probe bits on + * passive channel. */ + if ((scan_ch->type & 1) && n_probes) + scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); + } + + /* Set txpower levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); + return added; +} + +static void iwl3945_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwl3945_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +/** + * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL39_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL39_RTC_INST_LOWER_BOUND); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return rc; +} + + +/** + * iwl3945_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl3945_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl3945_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl3945_verify_inst_full(priv, image, len); + + return rc; +} + +static void iwl3945_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +#define IWL3945_UCODE_GET(item) \ +static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ +{ \ + return le32_to_cpu(ucode->v1.item); \ +} + +static u32 iwl3945_ucode_get_header_size(u32 api_ver) +{ + return 24; +} + +static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) +{ + return (u8 *) ucode->v1.data; +} + +IWL3945_UCODE_GET(inst_size); +IWL3945_UCODE_GET(data_size); +IWL3945_UCODE_GET(init_size); +IWL3945_UCODE_GET(init_data_size); +IWL3945_UCODE_GET(boot_size); + +/** + * iwl3945_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl3945_read_ucode(struct iwl_priv *priv) +{ + const struct iwl_ucode_header *ucode; + int ret = -EINVAL, index; + const struct firmware *ucode_raw; + /* firmware file name contains uCode/driver compatibility version */ + const char *name_pre = priv->cfg->fw_name_pre; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + char buf[25]; + u8 *src; + size_t len; + u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + for (index = api_max; index >= api_min; index--) { + sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); + ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); + if (ret < 0) { + IWL_ERR(priv, "%s firmware file req failed: %d\n", + buf, ret); + if (ret == -ENOENT) + continue; + else + goto error; + } else { + if (index < api_max) + IWL_ERR(priv, "Loaded firmware %s, " + "which is deprecated. " + " Please use API v%u instead.\n", + buf, api_max); + IWL_DEBUG_INFO(priv, "Got firmware '%s' file " + "(%zd bytes) from disk\n", + buf, ucode_raw->size); + break; + } + } + + if (ret < 0) + goto error; + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) { + IWL_ERR(priv, "File size way too small!\n"); + ret = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + inst_size = iwl3945_ucode_get_inst_size(ucode); + data_size = iwl3945_ucode_get_data_size(ucode); + init_size = iwl3945_ucode_get_init_size(ucode); + init_data_size = iwl3945_ucode_get_init_data_size(ucode); + boot_size = iwl3945_ucode_get_boot_size(ucode); + src = iwl3945_ucode_get_data(ucode); + + /* api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward */ + + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + priv->ucode_ver = 0; + ret = -EINVAL; + goto err_release; + } + if (api_ver != api_max) + IWL_ERR(priv, "Firmware has old API version. Expected %u, " + "got %u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", + boot_size); + + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO(priv, + "uCode file size %zd does not match expected size\n", + ucode_raw->size); + ret = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL39_MAX_INST_SIZE) { + IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", + inst_size); + ret = -EINVAL; + goto err_release; + } + + if (data_size > IWL39_MAX_DATA_SIZE) { + IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", + data_size); + ret = -EINVAL; + goto err_release; + } + if (init_size > IWL39_MAX_INST_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode init instr len %d too large to fit in\n", + init_size); + ret = -EINVAL; + goto err_release; + } + if (init_data_size > IWL39_MAX_DATA_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode init data len %d too large to fit in\n", + init_data_size); + ret = -EINVAL; + goto err_release; + } + if (boot_size > IWL39_MAX_BSM_SIZE) { + IWL_DEBUG_INFO(priv, + "uCode boot instr len %d too large to fit in\n", + boot_size); + ret = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (init_size && init_data_size) { + priv->ucode_init.len = init_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = init_data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (boot_size) { + priv->ucode_boot.len = boot_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + len = inst_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) uCode instr len %zd\n", len); + memcpy(priv->ucode_code.v_addr, src, len); + src += len; + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl3945_up() */ + len = data_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) uCode data len %zd\n", len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + src += len; + + /* Initialization instructions (3rd block) */ + if (init_size) { + len = init_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init instr len %zd\n", len); + memcpy(priv->ucode_init.v_addr, src, len); + src += len; + } + + /* Initialization data (4th block) */ + if (init_data_size) { + len = init_data_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init data len %zd\n", len); + memcpy(priv->ucode_init_data.v_addr, src, len); + src += len; + } + + /* Bootstrap instructions (5th block) */ + len = boot_size; + IWL_DEBUG_INFO(priv, + "Copying (but not loading) boot instr len %zd\n", len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + ret = -ENOMEM; + iwl3945_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return ret; +} + + +/** + * iwl3945_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + + /* bits 31:0 for 3945 */ + pinst = priv->ucode_code.p_addr; + pdata = priv->ucode_data_backup.p_addr; + + /* Tell bootstrap uCode where to find image to load */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst byte count must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); + + return 0; +} + +/** + * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. + */ +static void iwl3945_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl3945_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + if (iwl3945_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +/** + * iwl3945_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl3945_init_alive_start()). + */ +static void iwl3945_alive_start(struct iwl_priv *priv) +{ + int thermal_spin = 0; + u32 rfkill; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl3945_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG); + IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); + + if (rfkill & 0x1) { + clear_bit(STATUS_RF_KILL_HW, &priv->status); + /* if RFKILL is not on, then wait for thermal + * sensor in adapter to kick in */ + while (iwl3945_hw_get_temperature(priv) == 0) { + thermal_spin++; + udelay(10); + } + + if (thermal_spin) + IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", + thermal_spin * 10); + } else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + /* After the ALIVE response, we can send commands to 3945 uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Enable watchdog to monitor the driver tx queues */ + iwl_legacy_setup_watchdog(priv); + + if (iwl_legacy_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = IWL_RATES_MASK_3945; + + iwl_legacy_power_update_mode(priv, true); + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + struct iwl3945_rxon_cmd *active_rxon = + (struct iwl3945_rxon_cmd *)(&ctx->active); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_legacy_connection_init_rx_config(priv, ctx); + } + + /* Configure Bluetooth device coexistence support */ + iwl_legacy_send_bt_config(priv); + + set_bit(STATUS_READY, &priv->status); + + /* Configure the adapter for unassociated operation */ + iwl3945_commit_rxon(priv, ctx); + + iwl3945_reg_txpower_periodic(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + wake_up_interruptible(&priv->wait_command_queue); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl3945_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending; + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + iwl_legacy_scan_cancel_timeout(priv, 200); + + exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); + + /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&priv->watchdog); + + /* Station information will now be cleared in device */ + iwl_legacy_clear_ucode_stations(priv, NULL); + iwl_legacy_dealloc_bcast_stations(priv); + iwl_legacy_clear_driver_stations(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl3945_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl3945_init() then + * clear all bits but the RF Kill bits and return */ + if (!iwl_legacy_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + iwl3945_hw_txq_ctx_stop(priv); + iwl3945_hw_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Stop the device, and put it in low power state */ + iwl_legacy_apm_stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = NULL; + + /* clear out any free frames */ + iwl3945_clear_free_frames(priv); +} + +static void iwl3945_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl3945_down(priv); + mutex_unlock(&priv->mutex); + + iwl3945_cancel_deferred_work(priv); +} + +#define MAX_HW_RESTARTS 5 + +static int iwl3945_alloc_bcast_station(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + sta_id = iwl_legacy_prep_station(priv, ctx, + iwl_bcast_addr, false, NULL); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return -EINVAL; + } + + priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used |= IWL_STA_BCAST; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +static int __iwl3945_up(struct iwl_priv *priv) +{ + int rc, i; + + rc = iwl3945_alloc_bcast_station(priv); + if (rc) + return rc; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bring up\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else { + set_bit(STATUS_RF_KILL_HW, &priv->status); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return -ENODEV; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl3945_hw_nic_init(priv); + if (rc) { + IWL_ERR(priv, "Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + /* We return success when we resume from suspend and rf_kill is on. */ + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) + return 0; + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = priv->cfg->ops->lib->load_ucode(priv); + + if (rc) { + IWL_ERR(priv, + "Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl3945_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl3945_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl3945_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl3945_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +/* + * 3945 cannot interrupt driver when hardware rf kill switch toggles; + * driver must poll CSR_GP_CNTRL_REG register for change. This register + * *is* readable even when device has been SW_RESET into low power mode + * (e.g. during RF KILL). + */ +static void iwl3945_rfkill_poll(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, _3945.rfkill_poll.work); + bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); + bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) + & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); + + if (new_rfkill != old_rfkill) { + if (new_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); + + IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", + new_rfkill ? "disable radio" : "enable radio"); + } + + /* Keep this running, even if radio now enabled. This will be + * cancelled in mac_start() if system decides to start again */ + queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, + round_jiffies_relative(2 * HZ)); + +} + +int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl3945_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct iwl3945_scan_cmd *scan; + u8 n_probes = 0; + enum ieee80211_band band; + bool is_active = false; + int ret; + + lockdep_assert_held(&priv->mutex); + + if (!priv->scan_cmd) { + priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan_cmd) { + IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); + return -ENOMEM; + } + } + scan = priv->scan_cmd; + memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + + if (priv->is_internal_short_scan) + interval = 0; + else + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + /* + * suspend time format: + * 0-19: beacon interval in usec (time before exec.) + * 20-23: 0 + * 24-31: number of beacons (suspend between channels) + */ + + extra = (suspend_time / interval) << 24; + scan_suspend_time = 0xFF0FFFFF & + (extra | ((suspend_time % interval) * 1024)); + + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); + } else if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + switch (priv->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate = IWL_RATE_1M_PLCP; + band = IEEE80211_BAND_2GHZ; + break; + case IEEE80211_BAND_5GHZ: + scan->tx_cmd.rate = IWL_RATE_6M_PLCP; + band = IEEE80211_BAND_5GHZ; + break; + default: + IWL_WARN(priv, "Invalid scan band\n"); + return -EIO; + } + + /* + * If active scaning is requested but a certain channel + * is marked passive, we can do active scanning if we + * detect transmissions. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_DISABLED; + + if (!priv->is_internal_short_scan) { + scan->tx_cmd.len = cpu_to_le16( + iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + vif->addr, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan))); + } else { + /* use bcast addr, will not be transmitted but must be valid */ + scan->tx_cmd.len = cpu_to_le16( + iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + iwl_bcast_addr, NULL, 0, + IWL_MAX_SCAN_SIZE - sizeof(*scan))); + } + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + + if (priv->is_internal_short_scan) { + scan->channel_count = + iwl3945_get_single_channel_for_scan(priv, vif, band, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } else { + scan->channel_count = + iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, + (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif); + } + + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl3945_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + clear_bit(STATUS_SCAN_HW, &priv->status); + return ret; +} + +void iwl3945_post_scan(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + /* + * Since setting the RXON may have been deferred while + * performing the scan, fire one off if needed + */ + if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) + iwl3945_commit_rxon(priv, ctx); +} + +static void iwl3945_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + struct iwl_rxon_context *ctx; + mutex_lock(&priv->mutex); + for_each_context(priv, ctx) + ctx->vif = NULL; + priv->is_open = 0; + mutex_unlock(&priv->mutex); + iwl3945_down(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl3945_down(priv); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl3945_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl3945_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +void iwl3945_post_associate(struct iwl_priv *priv) +{ + int rc = 0; + struct ieee80211_conf *conf = NULL; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (!ctx->vif || !priv->is_open) + return; + + IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", + ctx->vif->bss_conf.aid, ctx->active.bssid_addr); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_legacy_scan_cancel_timeout(priv, 200); + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl3945_commit_rxon(priv, ctx); + + rc = iwl_legacy_send_rxon_timing(priv, ctx); + if (rc) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + + ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); + + IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", + ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int); + + if (ctx->vif->bss_conf.use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (ctx->vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + } + + iwl3945_commit_rxon(priv, ctx); + + switch (ctx->vif->type) { + case NL80211_IFTYPE_STATION: + iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); + break; + case NL80211_IFTYPE_ADHOC: + iwl3945_send_beacon_cmd(priv); + break; + default: + IWL_ERR(priv, "%s Should not be called in %d mode\n", + __func__, ctx->vif->type); + break; + } +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (2 * HZ) + +static int iwl3945_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + + if (!priv->ucode_code.len) { + ret = iwl3945_read_ucode(priv); + if (ret) { + IWL_ERR(priv, "Could not read microcode: %d\n", ret); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + } + + ret = __iwl3945_up(priv); + + mutex_unlock(&priv->mutex); + + if (ret) + goto out_release_irq; + + IWL_DEBUG_INFO(priv, "Start UP work.\n"); + + /* Wait for START_ALIVE from ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, + "Wait for START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + ret = -ETIMEDOUT; + goto out_release_irq; + } + } + + /* ucode is running and will send rfkill notifications, + * no need to poll the killswitch state anymore */ + cancel_delayed_work(&priv->_3945.rfkill_poll); + + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; + +out_release_irq: + priv->is_open = 0; + IWL_DEBUG_MAC80211(priv, "leave - failed\n"); + return ret; +} + +static void iwl3945_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) { + IWL_DEBUG_MAC80211(priv, "leave - skip\n"); + return; + } + + priv->is_open = 0; + + iwl3945_down(priv); + + flush_workqueue(priv->workqueue); + + /* start polling the killswitch state again */ + queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, + round_jiffies_relative(2 * HZ)); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl3945_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return NETDEV_TX_OK; +} + +void iwl3945_config_ap(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_vif *vif = ctx->vif; + int rc = 0; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* The following should be done only at AP bring up */ + if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) { + + /* RXON - unassoc (to set timing command) */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl3945_commit_rxon(priv, ctx); + + /* RXON Timing */ + rc = iwl_legacy_send_rxon_timing(priv, ctx); + if (rc) + IWL_WARN(priv, "REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + ctx->staging.assoc_id = 0; + + if (vif->bss_conf.use_short_preamble) + ctx->staging.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { + if (vif->bss_conf.use_short_slot) + ctx->staging.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl3945_commit_rxon(priv, ctx); + } + iwl3945_send_beacon_cmd(priv); +} + +static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + int ret = 0; + u8 sta_id = IWL_INVALID_STATION; + u8 static_key; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwl3945_mod_params.sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + /* + * To support IBSS RSN, don't program group keys in IBSS, the + * hardware will then not attempt to decrypt the frames. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); + + if (!static_key) { + sta_id = iwl_legacy_sta_id_or_broadcast( + priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; + } + + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 100); + + switch (cmd) { + case SET_KEY: + if (static_key) + ret = iwl3945_set_static_key(priv, key); + else + ret = iwl3945_set_dynamic_key(priv, key, sta_id); + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (static_key) + ret = iwl3945_remove_static_key(priv); + else + ret = iwl3945_clear_sta_key_info(priv, sta_id); + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv; + int ret; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + u8 sta_id; + + IWL_DEBUG_INFO(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->common.sta_id = IWL_INVALID_STATION; + + + ret = iwl_legacy_add_station_common(priv, + &priv->contexts[IWL_RXON_CTX_BSS], + sta->addr, is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&priv->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl3945_rs_rate_init(priv, sta, sta_id); + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl3945_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but even if hw is ready, committing here breaks for some reason, + * we'll eventually commit the filter flags change anyway. + */ + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_legacy_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t iwl3945_show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); +} +static ssize_t iwl3945_store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + iwl3945_show_debug_level, iwl3945_store_debug_level); + +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + +static ssize_t iwl3945_show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL); + +static ssize_t iwl3945_show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t iwl3945_store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + IWL_INFO(priv, ": %s is not in decimal form.\n", buf); + else + iwl3945_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power); + +static ssize_t iwl3945_show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + return sprintf(buf, "0x%04X\n", ctx->active.flags); +} + +static ssize_t iwl3945_store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 flags = simple_strtoul(buf, NULL, 0); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + mutex_lock(&priv->mutex); + if (le32_to_cpu(ctx->staging.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", + flags); + ctx->staging.flags = cpu_to_le32(flags); + iwl3945_commit_rxon(priv, ctx); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags); + +static ssize_t iwl3945_show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(ctx->active.filter_flags)); +} + +static ssize_t iwl3945_store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) + IWL_WARN(priv, "Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + ctx->staging.filter_flags = + cpu_to_le32(filter_flags); + iwl3945_commit_rxon(priv, ctx); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags, + iwl3945_store_filter_flags); + +static ssize_t iwl3945_show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *)&measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t iwl3945_store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(ctx->active.channel), + .start_time = cpu_to_le64(priv->_3945.last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl3945_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + iwl3945_show_measurement, iwl3945_store_measurement); + +static ssize_t iwl3945_store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t iwl3945_show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate, + iwl3945_store_retry_rate); + + +static ssize_t iwl3945_show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* all this shit doesn't belong into sysfs anyway */ + return 0; +} + +static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL); + +static ssize_t iwl3945_show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); +} + +static ssize_t iwl3945_store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); + int ant; + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); + iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; + } else + IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna); + +static ssize_t iwl3945_show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL); + +static ssize_t iwl3945_dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + char *p = (char *)buf; + + if (p[0] == '1') + iwl3945_dump_nic_error_log(priv); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log); + +/***************************************************************************** + * + * driver setup and tear down + * + *****************************************************************************/ + +static void iwl3945_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl3945_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); + INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); + + iwl_legacy_setup_scan_deferred_work(priv); + + iwl3945_hw_setup_deferred_work(priv); + + init_timer(&priv->watchdog); + priv->watchdog.data = (unsigned long)priv; + priv->watchdog.function = iwl_legacy_bg_watchdog; + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl3945_irq_tasklet, (unsigned long)priv); +} + +static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl3945_hw_cancel_deferred_work(priv); + + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->alive_start); + + iwl_legacy_cancel_scan_deferred_work(priv); +} + +static struct attribute *iwl3945_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, + &dev_attr_measurement.attr, + &dev_attr_retry_rate.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl3945_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl3945_sysfs_entries, +}; + +struct ieee80211_ops iwl3945_hw_ops = { + .tx = iwl3945_mac_tx, + .start = iwl3945_mac_start, + .stop = iwl3945_mac_stop, + .add_interface = iwl_legacy_mac_add_interface, + .remove_interface = iwl_legacy_mac_remove_interface, + .change_interface = iwl_legacy_mac_change_interface, + .config = iwl_legacy_mac_config, + .configure_filter = iwl3945_configure_filter, + .set_key = iwl3945_mac_set_key, + .conf_tx = iwl_legacy_mac_conf_tx, + .reset_tsf = iwl_legacy_mac_reset_tsf, + .bss_info_changed = iwl_legacy_mac_bss_info_changed, + .hw_scan = iwl_legacy_mac_hw_scan, + .sta_add = iwl3945_mac_sta_add, + .sta_remove = iwl_legacy_mac_sta_remove, + .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, +}; + +static int iwl3945_init_drv(struct iwl_priv *priv) +{ + int ret; + struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; + + priv->retry_rate = 1; + priv->beacon_skb = NULL; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + mutex_init(&priv->sync_cmd_mutex); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_RF_RESET; + priv->force_reset[IWL_FW_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + + priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; + priv->tx_power_next = IWL_DEFAULT_TX_POWER; + + if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { + IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", + eeprom->version); + ret = -EINVAL; + goto err; + } + ret = iwl_legacy_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + /* Set up txpower settings in driver for all channels */ + if (iwl3945_txpower_set_from_eeprom(priv)) { + ret = -EIO; + goto err_free_channel_map; + } + + ret = iwl_legacy_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl3945_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_legacy_free_channel_map(priv); +err: + return ret; +} + +#define IWL3945_MAX_PROBE_REQUEST 200 + +static int iwl3945_setup_mac(struct iwl_priv *priv) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + + hw->rate_control_algorithm = "iwl-3945-rs"; + hw->sta_data_size = sizeof(struct iwl3945_sta_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_SPECTRUM_MGMT; + + hw->wiphy->interface_modes = + priv->contexts[IWL_RXON_CTX_BSS].interface_modes; + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + iwl_legacy_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + +static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0, i; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + struct iwl3945_eeprom *eeprom; + unsigned long flags; + + /*********************** + * 1. Allocating HW data + * ********************/ + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = iwl_legacy_alloc_all(cfg); + if (hw == NULL) { + pr_err("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + priv = hw->priv; + SET_IEEE80211_DEV(hw, &pdev->dev); + + priv->cmd_queue = IWL39_CMD_QUEUE_NUM; + + /* 3945 has only one valid context */ + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); + + for (i = 0; i < NUM_IWL_RXON_CTX; i++) + priv->contexts[i].ctxid = i; + + priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; + priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; + priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; + priv->contexts[IWL_RXON_CTX_BSS].interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; + priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; + priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + + /* + * Disabling hardware scan means that mac80211 will perform scans + * "the hard way", rather than using device's scan. + */ + if (iwl3945_mod_params.disable_hw_scan) { + dev_printk(KERN_DEBUG, &(pdev->dev), + "sw scan support is deprecated\n"); + iwl3945_hw_ops.hw_scan = NULL; + } + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /*************************** + * 2. Initializing PCI bus + * *************************/ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + /*********************** + * 3. Read REV Register + * ********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /*********************** + * 4. Read EEPROM + * ********************/ + + /* Read the EEPROM */ + err = iwl_legacy_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + /* MAC Address location in EEPROM same for 3945/4965 */ + eeprom = (struct iwl3945_eeprom *)priv->eeprom; + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); + SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); + + /*********************** + * 5. Setup HW Constants + * ********************/ + /* Device-specific setup */ + if (iwl3945_hw_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw settings\n"); + goto out_eeprom_free; + } + + /*********************** + * 6. Setup priv + * ********************/ + + err = iwl3945_init_drv(priv); + if (err) { + IWL_ERR(priv, "initializing driver failed\n"); + goto out_unset_hw_params; + } + + IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", + priv->cfg->name); + + /*********************** + * 7. Setup Services + * ********************/ + + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + + err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_release_irq; + } + + iwl_legacy_set_rxon_channel(priv, + &priv->bands[IEEE80211_BAND_2GHZ].channels[5], + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl3945_setup_deferred_work(priv); + iwl3945_setup_rx_handlers(priv); + iwl_legacy_power_initialize(priv); + + /********************************* + * 8. Setup and Register mac80211 + * *******************************/ + + iwl_legacy_enable_interrupts(priv); + + err = iwl3945_setup_mac(priv); + if (err) + goto out_remove_sysfs; + + err = iwl_legacy_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); + + /* Start monitoring the killswitch */ + queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, + 2 * HZ); + + return 0; + + out_remove_sysfs: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); + out_release_irq: + free_irq(priv->pci_dev->irq, priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl_legacy_free_geos(priv); + iwl_legacy_free_channel_map(priv); + out_unset_hw_params: + iwl3945_unset_hw_params(priv); + out_eeprom_free: + iwl_legacy_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_legacy_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_legacy_dbgfs_unregister(priv); + + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_legacy_leds_exit(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl3945_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl_down(), but there are paths to + * run iwl_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + iwl_legacy_apm_stop(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl3945_synchronize_irq(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); + + cancel_delayed_work_sync(&priv->_3945.rfkill_poll); + + iwl3945_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl3945_rx_queue_free(priv, &priv->rxq); + iwl3945_hw_txq_ctx_free(priv); + + iwl3945_unset_hw_params(priv); + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_legacy_free_traffic_mem(priv); + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl_legacy_free_channel_map(priv); + iwl_legacy_free_geos(priv); + kfree(priv->scan_cmd); + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + ieee80211_free_hw(priv->hw); +} + + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl3945_driver = { + .name = DRV_NAME, + .id_table = iwl3945_hw_card_ids, + .probe = iwl3945_pci_probe, + .remove = __devexit_p(iwl3945_pci_remove), + .driver.pm = IWL_LEGACY_PM_OPS, +}; + +static int __init iwl3945_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = iwl3945_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl3945_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwl3945_rate_control_unregister(); + return ret; +} + +static void __exit iwl3945_exit(void) +{ + pci_unregister_driver(&iwl3945_driver); + iwl3945_rate_control_unregister(); +} + +MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); + +module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, + "using software crypto (default 1 [software])"); +module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, + int, S_IRUGO); +MODULE_PARM_DESC(disable_hw_scan, + "disable hardware scanning (default 0) (deprecated)"); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif +module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); + +module_exit(iwl3945_exit); +module_init(iwl3945_init); diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c new file mode 100644 index 000000000000..c0e07685059a --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c @@ -0,0 +1,3633 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci-aspm.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define DRV_NAME "iwl4965" + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-sta.h" +#include "iwl-4965-calib.h" +#include "iwl-4965.h" +#include "iwl-4965-led.h" + + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +#define DRV_VERSION IWLWIFI_VERSION VD + + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("iwl4965"); + +void iwl4965_update_chain_flags(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + + if (priv->cfg->ops->hcmd->set_rxon_chain) { + for_each_context(priv, ctx) { + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + if (ctx->active.rx_chain != ctx->staging.rx_chain) + iwl_legacy_commit_rxon(priv, ctx); + } + } +} + +static void iwl4965_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_skb) + return 0; + + if (priv->beacon_skb->len > left) + return 0; + + memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); + + return priv->beacon_skb->len; +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void iwl4965_set_beacon_tim(struct iwl_priv *priv, + struct iwl_tx_beacon_cmd *tx_beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx+1]; + } else + IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); +} + +static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + u32 frame_size; + u32 rate_flags; + u32 rate; + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); + return 0; + } + + /* Initialize memory */ + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + /* Set up TX beacon contents */ + frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) + return 0; + if (!frame_size) + return 0; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, + frame_size); + + /* Set up packet rate and flags */ + rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx); + priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); + rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); + if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, + rate_flags); + + return sizeof(*tx_beacon_cmd) + frame_size; +} + +int iwl4965_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + + frame = iwl4965_get_free_frame(priv); + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + frame_size = iwl4965_hw_get_beacon_cmd(priv, frame); + if (!frame_size) { + IWL_ERR(priv, "Error configuring the beacon command\n"); + iwl4965_free_frame(priv, frame); + return -EINVAL; + } + + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl4965_free_frame(priv, frame); + + return rc; +} + +static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; + struct iwl_tfd *tfd; + struct pci_dev *dev = priv->pci_dev; + int index = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[index]; + + /* Sanity check on number of chunks */ + num_tbs = iwl4965_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) + pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i), + iwl4965_tfd_tb_get_len(tfd, i), + PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = (struct iwl_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl4965_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl4965_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl4965_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +/** + * iwl4965_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to send a statistics request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void iwl4965_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!iwl_legacy_is_ready_rf(priv)) + return; + + iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false); +} + + +static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base, + u32 start_idx, u32 num_events, + u32 mode) +{ + u32 i; + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (mode == 0) + ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); + else + ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (iwl_grab_nic_access(priv)) { + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return; + } + + /* Set starting address; reads will auto-increment */ + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* + * "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. + */ + for (i = 0; i < num_events; i++) { + ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + trace_iwlwifi_legacy_dev_ucode_cont_event(priv, + 0, time, ev); + } else { + data = _iwl_legacy_read_direct32(priv, + HBUS_TARG_MEM_RDAT); + trace_iwlwifi_legacy_dev_ucode_cont_event(priv, + time, data, ev); + } + } + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static void iwl4965_continuous_event_trace(struct iwl_priv *priv) +{ + u32 capacity; /* event log capacity in # entries */ + u32 base; /* SRAM byte address of event log header */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + capacity = iwl_legacy_read_targ_mem(priv, base); + num_wraps = iwl_legacy_read_targ_mem(priv, + base + (2 * sizeof(u32))); + mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); + next_entry = iwl_legacy_read_targ_mem(priv, + base + (3 * sizeof(u32))); + } else + return; + + if (num_wraps == priv->event_log.num_wraps) { + iwl4965_print_cont_event_trace(priv, + base, priv->event_log.next_entry, + next_entry - priv->event_log.next_entry, + mode); + priv->event_log.non_wraps_count++; + } else { + if ((num_wraps - priv->event_log.num_wraps) > 1) + priv->event_log.wraps_more_count++; + else + priv->event_log.wraps_once_count++; + trace_iwlwifi_legacy_dev_ucode_wrap_event(priv, + num_wraps - priv->event_log.num_wraps, + next_entry, priv->event_log.next_entry); + if (next_entry < priv->event_log.next_entry) { + iwl4965_print_cont_event_trace(priv, base, + priv->event_log.next_entry, + capacity - priv->event_log.next_entry, + mode); + + iwl4965_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } else { + iwl4965_print_cont_event_trace(priv, base, + next_entry, capacity - next_entry, + mode); + + iwl4965_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } + } + priv->event_log.num_wraps = num_wraps; + priv->event_log.next_entry = next_entry; +} + +/** + * iwl4965_bg_ucode_trace - Timer callback to log ucode event + * + * The timer is continually set to execute every + * UCODE_TRACE_PERIOD milliseconds after the last timer expired + * this function is to perform continuous uCode event logging operation + * if enabled + */ +static void iwl4965_bg_ucode_trace(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->event_log.ucode_trace) { + iwl4965_continuous_event_trace(priv); + /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } +} + +static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = + (struct iwl4965_beacon_notif *)pkt->u.raw; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv) +{ + unsigned long flags; + + IWL_DEBUG_POWER(priv, "Stop all queues\n"); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + + spin_lock_irqsave(&priv->reg_lock, flags); + if (!iwl_grab_nic_access(priv)) + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + } + + if (flags & CT_CARD_DISABLED) + iwl4965_perform_ct_kill_task(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + if (!(flags & RXON_CARD_DISABLED)) + iwl_legacy_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; + priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_legacy_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_legacy_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics; + + iwl_legacy_setup_rx_scan_handlers(priv); + + /* status change handler */ + priv->rx_handlers[CARD_STATE_NOTIFICATION] = + iwl4965_rx_card_state_notif; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl4965_rx_missed_beacon_notif; + /* Rx handlers */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; + /* block ack */ + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; + /* Set up hardware specific Rx handlers */ + priv->cfg->ops->lib->rx_handler_setup(priv); +} + +/** + * iwl4965_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +void iwl4965_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_legacy_dev_rx(priv, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_RX) && + (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl4965_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, + i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_legacy_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_legacy_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl4965_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl4965_rx_replenish_now(priv); + else + iwl4965_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl4965_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static void iwl4965_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR49_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR49_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_legacy_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_legacy_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + iwl_legacy_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* + * uCode wakes up after power-down sleep. + * Tell device about any new tx or host commands enqueued, + * and about any Rx buffers made available while asleep. + */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]); + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl4965_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, + "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t iwl4965_show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); +} +static ssize_t iwl4965_store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + iwl4965_show_debug_level, iwl4965_store_debug_level); + + +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + + +static ssize_t iwl4965_show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->temperature); +} + +static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL); + +static ssize_t iwl4965_show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_ready_rf(priv)) + return sprintf(buf, "off\n"); + else + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t iwl4965_store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "%s is not in decimal form.\n", buf); + else { + ret = iwl_legacy_set_tx_power(priv, val, false); + if (ret) + IWL_ERR(priv, "failed setting tx power (0x%d).\n", + ret); + else + ret = count; + } + return ret; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, + iwl4965_show_tx_power, iwl4965_store_tx_power); + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +static void iwl4965_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +static void iwl4965_ucode_callback(const struct firmware *ucode_raw, + void *context); +static int iwl4965_mac_setup_register(struct iwl_priv *priv, + u32 max_probe_length); + +static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first) +{ + const char *name_pre = priv->cfg->fw_name_pre; + char tag[8]; + + if (first) { + priv->fw_index = priv->cfg->ucode_api_max; + sprintf(tag, "%d", priv->fw_index); + } else { + priv->fw_index--; + sprintf(tag, "%d", priv->fw_index); + } + + if (priv->fw_index < priv->cfg->ucode_api_min) { + IWL_ERR(priv, "no suitable firmware found!\n"); + return -ENOENT; + } + + sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); + + IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", + priv->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, + &priv->pci_dev->dev, GFP_KERNEL, priv, + iwl4965_ucode_callback); +} + +struct iwl4965_firmware_pieces { + const void *inst, *data, *init, *init_data, *boot; + size_t inst_size, data_size, init_size, init_data_size, boot_size; +}; + +static int iwl4965_load_firmware(struct iwl_priv *priv, + const struct firmware *ucode_raw, + struct iwl4965_firmware_pieces *pieces) +{ + struct iwl_ucode_header *ucode = (void *)ucode_raw->data; + u32 api_ver, hdr_size; + const u8 *src; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + + switch (api_ver) { + default: + case 0: + case 1: + case 2: + hdr_size = 24; + if (ucode_raw->size < hdr_size) { + IWL_ERR(priv, "File size too small!\n"); + return -EINVAL; + } + pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); + pieces->data_size = le32_to_cpu(ucode->v1.data_size); + pieces->init_size = le32_to_cpu(ucode->v1.init_size); + pieces->init_data_size = + le32_to_cpu(ucode->v1.init_data_size); + pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); + src = ucode->v1.data; + break; + } + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != hdr_size + pieces->inst_size + + pieces->data_size + pieces->init_size + + pieces->init_data_size + pieces->boot_size) { + + IWL_ERR(priv, + "uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + return -EINVAL; + } + + pieces->inst = src; + src += pieces->inst_size; + pieces->data = src; + src += pieces->data_size; + pieces->init = src; + src += pieces->init_size; + pieces->init_data = src; + src += pieces->init_data_size; + pieces->boot = src; + src += pieces->boot_size; + + return 0; +} + +/** + * iwl4965_ucode_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void +iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context) +{ + struct iwl_priv *priv = context; + struct iwl_ucode_header *ucode; + int err; + struct iwl4965_firmware_pieces pieces; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + u32 api_ver; + + u32 max_probe_length = 200; + u32 standard_phy_calibration_size = + IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; + + memset(&pieces, 0, sizeof(pieces)); + + if (!ucode_raw) { + if (priv->fw_index <= priv->cfg->ucode_api_max) + IWL_ERR(priv, + "request for firmware file '%s' failed.\n", + priv->firmware_name); + goto try_again; + } + + IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", + priv->firmware_name, ucode_raw->size); + + /* Make sure that we got at least the API version number */ + if (ucode_raw->size < 4) { + IWL_ERR(priv, "File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + err = iwl4965_load_firmware(priv, ucode_raw, &pieces); + + if (err) + goto try_again; + + api_ver = IWL_UCODE_API(priv->ucode_ver); + + /* + * api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward + */ + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, + "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; + } + + if (api_ver != api_max) + IWL_ERR(priv, + "Firmware has old API version. Expected v%u, " + "got v%u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + /* + * For any of the failures below (before allocating pci memory) + * we will try to load a version with a smaller API -- maybe the + * user just got a corrupted version of the latest API. + */ + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n", + pieces.inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n", + pieces.data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n", + pieces.init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", + pieces.init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n", + pieces.boot_size); + + /* Verify that uCode images will fit in card's SRAM */ + if (pieces.inst_size > priv->hw_params.max_inst_size) { + IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", + pieces.inst_size); + goto try_again; + } + + if (pieces.data_size > priv->hw_params.max_data_size) { + IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", + pieces.data_size); + goto try_again; + } + + if (pieces.init_size > priv->hw_params.max_inst_size) { + IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", + pieces.init_size); + goto try_again; + } + + if (pieces.init_data_size > priv->hw_params.max_data_size) { + IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", + pieces.init_data_size); + goto try_again; + } + + if (pieces.boot_size > priv->hw_params.max_bsm_size) { + IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n", + pieces.boot_size); + goto try_again; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = pieces.inst_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = pieces.data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = pieces.data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (pieces.init_size && pieces.init_data_size) { + priv->ucode_init.len = pieces.init_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = pieces.init_data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (pieces.boot_size) { + priv->ucode_boot.len = pieces.boot_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Now that we can no longer fail, copy information */ + + priv->sta_key_max_num = STA_KEY_MAX_NUM; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", + pieces.inst_size); + memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size); + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* + * Runtime data + * NOTE: Copy into backup buffer will be done in iwl_up() + */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", + pieces.data_size); + memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size); + memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size); + + /* Initialization instructions */ + if (pieces.init_size) { + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init instr len %Zd\n", + pieces.init_size); + memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size); + } + + /* Initialization data */ + if (pieces.init_data_size) { + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init data len %Zd\n", + pieces.init_data_size); + memcpy(priv->ucode_init_data.v_addr, pieces.init_data, + pieces.init_data_size); + } + + /* Bootstrap instructions */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", + pieces.boot_size); + memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); + + /* + * figure out the offset of chain noise reset and gain commands + * base on the size of standard phy calibration commands table size + */ + priv->_4965.phy_calib_chain_noise_reset_cmd = + standard_phy_calibration_size; + priv->_4965.phy_calib_chain_noise_gain_cmd = + standard_phy_calibration_size + 1; + + /************************************************** + * This is still part of probe() in a sense... + * + * 9. Setup and register with mac80211 and debugfs + **************************************************/ + err = iwl4965_mac_setup_register(priv, max_probe_length); + if (err) + goto out_unbind; + + err = iwl_legacy_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, + "failed to create debugfs files. Ignoring error: %d\n", err); + + err = sysfs_create_group(&priv->pci_dev->dev.kobj, + &iwl_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_unbind; + } + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + complete(&priv->_4965.firmware_loading_complete); + return; + + try_again: + /* try next, if any */ + if (iwl4965_request_firmware(priv, false)) + goto out_unbind; + release_firmware(ucode_raw); + return; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + iwl4965_dealloc_ucode_pci(priv); + out_unbind: + complete(&priv->_4965.firmware_loading_complete); + device_release_driver(&priv->pci_dev->dev); + release_firmware(ucode_raw); +} + +static const char * const desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT" + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", +}; + +static struct { char *name; u8 num; } advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *iwl4965_desc_lookup(u32 num) +{ + int i; + int max = ARRAY_SIZE(desc_lookup_text); + + if (num < max) + return desc_lookup_text[num]; + + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break; + } + return advanced_lookup[i].name; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl4965_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + u32 pc, hcmd; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + } + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return; + } + + count = iwl_legacy_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32)); + priv->isr_stats.err_code = desc; + pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32)); + blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32)); + line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32)); + time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32)); + hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32)); + + trace_iwlwifi_legacy_dev_ucode_error(priv, desc, + time, data1, data2, line, + blink1, blink2, ilink1, ilink2); + + IWL_ERR(priv, "Desc Time " + "data1 data2 line\n"); + IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", + iwl4965_desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); + IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", + pc, blink1, blink2, ilink1, ilink2, hcmd); +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl4965_print_event_log - Dump error event log to syslog + * + */ +static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (num_events == 0) + return pos; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + } + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + /* Set starting address; reads will auto-increment */ + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOG:0x%08x:%04u\n", + time, ev); + } else { + trace_iwlwifi_legacy_dev_ucode_event(priv, 0, + time, ev); + IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", + time, ev); + } + } else { + data = _iwl_legacy_read_direct32(priv, + HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + trace_iwlwifi_legacy_dev_ucode_event(priv, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return pos; +} + +/** + * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl4965_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl4965_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl4965_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } else { + if (next_entry < size) { + pos = iwl4965_print_event_log(priv, 0, next_entry, + mode, pos, buf, bufsz); + } else { + pos = iwl4965_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } + } + return pos; +} + +#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + int pos = 0; + size_t bufsz = 0; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + } + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Invalid event log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_legacy_read_targ_mem(priv, base); + mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#endif + IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", + size); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + /* + * if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl4965_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + /* (then/else) start at top of log */ + pos = iwl4965_print_event_log(priv, 0, + next_entry, mode, pos, buf, bufsz); + } else + pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&priv->lock, flags); + + cmd.critical_temperature_R = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + + ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, " + "critical temperature is %d\n", + priv->hw_params.ct_kill_threshold); +} + +static const s8 default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, + IWL49_CMD_FIFO_NUM, + IWL_TX_FIFO_UNUSED, + IWL_TX_FIFO_UNUSED, +}; + +static int iwl4965_alive_notify(struct iwl_priv *priv) +{ + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&priv->lock, flags); + + /* Clear 4965's internal Tx Scheduler data base */ + priv->scd_base_addr = iwl_legacy_read_prph(priv, + IWL49_SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + + /* Tel 4965 where to find Tx byte count tables */ + iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, + priv->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) + iwl_legacy_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); + iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Disable chain mode for all queues */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); + + /* Initialize each Tx queue (including the command queue) */ + for (i = 0; i < priv->hw_params.max_txq_num; i++) { + + /* TFD circular buffer read/write indexes */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + + /* Max Tx Window size for Scheduler-ACK mode */ + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + /* Frame limit */ + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, + (1 << priv->hw_params.max_txq_num) - 1); + + /* Activate all Tx DMA/FIFO channels */ + iwl4965_txq_set_sched(priv, IWL_MASK(0, 6)); + + iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0); + + /* make sure all queue are not stopped */ + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + /* reset to 0 to enable all the queue first */ + priv->txq_ctx_active_msk = 0; + /* Map each Tx/cmd queue to its corresponding fifo */ + BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); + + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + + iwl_txq_ctx_activate(priv, i); + + if (ac == IWL_TX_FIFO_UNUSED) + continue; + + iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * iwl4965_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl4965_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl4965_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + ret = iwl4965_alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition [ntf]: %d\n", ret); + goto restart; + } + + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Enable watchdog to monitor the driver tx queues */ + iwl_legacy_setup_watchdog(priv); + + if (iwl_legacy_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = IWL_RATES_MASK; + + if (iwl_legacy_is_associated_ctx(ctx)) { + struct iwl_legacy_rxon_cmd *active_rxon = + (struct iwl_legacy_rxon_cmd *)&ctx->active; + /* apply any changes in staging */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + struct iwl_rxon_context *tmp; + /* Initialize our rx_config data */ + for_each_context(priv, tmp) + iwl_legacy_connection_init_rx_config(priv, tmp); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + /* Configure bluetooth coexistence if enabled */ + iwl_legacy_send_bt_config(priv); + + iwl4965_reset_run_time_calib(priv); + + set_bit(STATUS_READY, &priv->status); + + /* Configure the adapter for unassociated operation */ + iwl_legacy_commit_rxon(priv, ctx); + + /* At this point, the NIC is initialized and operational */ + iwl4965_rf_kill_ct_config(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + wake_up_interruptible(&priv->wait_command_queue); + + iwl_legacy_power_update_mode(priv, true); + IWL_DEBUG_INFO(priv, "Updated power mode\n"); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl4965_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + iwl_legacy_scan_cancel_timeout(priv, 200); + + exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); + + /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&priv->watchdog); + + iwl_legacy_clear_ucode_stations(priv, NULL); + iwl_legacy_dealloc_bcast_stations(priv); + iwl_legacy_clear_driver_stations(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl4965_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill bit and return */ + if (!iwl_legacy_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + iwl4965_txq_ctx_stop(priv); + iwl4965_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_legacy_apm_stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = NULL; + + /* clear out any free frames */ + iwl4965_clear_free_frames(priv); +} + +static void iwl4965_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl4965_down(priv); + mutex_unlock(&priv->mutex); + + iwl4965_cancel_deferred_work(priv); +} + +#define HW_READY_TIMEOUT (50) + +static int iwl4965_set_hw_ready(struct iwl_priv *priv) +{ + int ret = 0; + + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + if (ret != -ETIMEDOUT) + priv->hw_ready = true; + else + priv->hw_ready = false; + + IWL_DEBUG_INFO(priv, "hardware %s\n", + (priv->hw_ready == 1) ? "ready" : "not ready"); + return ret; +} + +static int iwl4965_prepare_card_hw(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n"); + + ret = iwl4965_set_hw_ready(priv); + if (priv->hw_ready) + return ret; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + /* HW should be ready by now, check again. */ + if (ret != -ETIMEDOUT) + iwl4965_set_hw_ready(priv); + + return ret; +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl4965_up(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int i; + int ret; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bringup\n"); + return -EIO; + } + + for_each_context(priv, ctx) { + ret = iwl4965_alloc_bcast_station(priv, ctx); + if (ret) { + iwl_legacy_dealloc_bcast_stations(priv); + return ret; + } + } + + iwl4965_prepare_card_hw(priv); + + if (!priv->hw_ready) { + IWL_WARN(priv, "Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, + CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + if (iwl_legacy_is_rfkill(priv)) { + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + + iwl_legacy_enable_interrupts(priv); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + /* must be initialised before iwl_hw_nic_init */ + priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + + ret = iwl4965_hw_nic_init(priv); + if (ret) { + IWL_ERR(priv, "Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + ret = priv->cfg->ops->lib->load_ucode(priv); + + if (ret) { + IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", + ret); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl4965_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl4965_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl4965_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + priv->cfg->ops->lib->init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl4965_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_run_time_calib_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + run_time_calib_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl4965_chain_noise_calibration(priv, + (void *)&priv->_4965.statistics); + iwl4965_sensitivity_calibration(priv, + (void *)&priv->_4965.statistics); + } + + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + struct iwl_rxon_context *ctx; + + mutex_lock(&priv->mutex); + for_each_context(priv, ctx) + ctx->vif = NULL; + priv->is_open = 0; + + __iwl4965_down(priv); + + mutex_unlock(&priv->mutex); + iwl4965_cancel_deferred_work(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl4965_down(priv); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl4965_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl4965_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl4965_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (4 * HZ) + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int iwl4965_mac_setup_register(struct iwl_priv *priv, + u32 max_probe_length) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + struct iwl_rxon_context *ctx; + + hw->rate_control_algorithm = "iwl-4965-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; + + if (priv->cfg->sku & IWL_SKU_N) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + for_each_context(priv, ctx) { + hw->wiphy->interface_modes |= ctx->interface_modes; + hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; + } + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS; + + /* + * For now, disable PS by default because it affects + * RX performance significantly. + */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + iwl_legacy_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + + +int iwl4965_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + ret = __iwl4965_up(priv); + mutex_unlock(&priv->mutex); + + if (ret) + return ret; + + if (iwl_legacy_is_rfkill(priv)) + goto out; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + return -ETIMEDOUT; + } + } + + iwl4965_led_enable(priv); + +out: + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +void iwl4965_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + iwl4965_down(priv); + + flush_workqueue(priv->workqueue); + + /* enable interrupts again in order to receive rfkill changes */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MACDUMP(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl4965_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MACDUMP(priv, "leave\n"); + return NETDEV_TX_OK; +} + +void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta, + iv32, phase1key); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + u8 sta_id; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (priv->cfg->mod_params->sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; + + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 100); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + (key->hw_key_idx == HW_KEY_DEFAULT); + } + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) + ret = iwl4965_set_default_wep_key(priv, + vif_priv->ctx, key); + else + ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx, + key, sta_id); + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl4965_remove_default_wep_key(priv, ctx, key); + else + ret = iwl4965_remove_dynamic_key(priv, ctx, + key, sta_id); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct iwl_priv *priv = hw->priv; + int ret = -EINVAL; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->cfg->sku & IWL_SKU_N)) + return -EACCES; + + mutex_lock(&priv->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + IWL_DEBUG_HT(priv, "start Rx\n"); + ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl4965_sta_rx_agg_stop(priv, sta, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); + ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn); + if (ret == 0) { + priv->_4965.agg_tids_count++; + IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n", + priv->_4965.agg_tids_count); + } + break; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl4965_tx_agg_stop(priv, vif, sta, tid); + if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) { + priv->_4965.agg_tids_count--; + IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n", + priv->_4965.agg_tids_count); + } + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = 0; + break; + } + mutex_unlock(&priv->mutex); + + return ret; +} + +int iwl4965_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret; + u8 sta_id; + + IWL_DEBUG_INFO(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->common.sta_id = IWL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + + ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr, + is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&priv->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl4965_rs_rate_init(priv, sta, sta_id); + mutex_unlock(&priv->mutex); + + return 0; +} + +void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u16 ch; + unsigned long flags = 0; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwl_legacy_is_rfkill(priv)) + goto out_exit; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + goto out_exit; + + if (!iwl_legacy_is_associated_ctx(ctx)) + goto out_exit; + + /* channel switch in progress */ + if (priv->switch_rxon.switch_in_progress == true) + goto out_exit; + + mutex_lock(&priv->mutex); + if (priv->cfg->ops->lib->set_channel_switch) { + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) != ch) { + ch_info = iwl_legacy_get_channel_info(priv, + channel->band, + ch); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + spin_lock_irqsave(&priv->lock, flags); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_legacy_set_rxon_channel(priv, channel, ctx); + iwl_legacy_set_rxon_ht(priv, ht_conf); + iwl_legacy_set_flags_for_band(priv, ctx, channel->band, + ctx->vif); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_legacy_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + if (priv->cfg->ops->lib->set_channel_switch(priv, + ch_switch)) + priv->switch_rxon.switch_in_progress = false; + } + } +out: + mutex_unlock(&priv->mutex); +out_exit: + if (!priv->switch_rxon.switch_in_progress) + ieee80211_chswitch_done(ctx->vif, false); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + for_each_context(priv, ctx) { + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + } + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_legacy_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl4965_bg_txpower_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + txpower_work); + + /* If a scan happened to start before we got here + * then just return; the statistics notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + /* Regardless of if we are associated, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + priv->cfg->ops->lib->send_tx_power(priv); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + priv->last_temperature = priv->temperature; + + mutex_unlock(&priv->mutex); +} + +static void iwl4965_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl4965_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); + INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); + + iwl_legacy_setup_scan_deferred_work(priv); + + INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); + + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; + + init_timer(&priv->ucode_trace); + priv->ucode_trace.data = (unsigned long)priv; + priv->ucode_trace.function = iwl4965_bg_ucode_trace; + + init_timer(&priv->watchdog); + priv->watchdog.data = (unsigned long)priv; + priv->watchdog.function = iwl_legacy_bg_watchdog; + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl4965_irq_tasklet, (unsigned long)priv); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->txpower_work); + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->run_time_calib_work); + + iwl_legacy_cancel_scan_deferred_work(priv); + + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); +} + +static void iwl4965_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwl_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} +/* + * Acquire priv->lock before calling this function ! + */ +void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) +{ + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); +} + +void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + + /* Find out whether to activate Tx queue */ + int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; + + /* Set up and activate */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + IWL49_SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", + active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + + +static int iwl4965_init_drv(struct iwl_priv *priv) +{ + int ret; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + mutex_init(&priv->sync_cmd_mutex); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + priv->_4965.agg_tids_count = 0; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_RF_RESET; + priv->force_reset[IWL_FW_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + /* Choose which receivers/antennas to use */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + + iwl_legacy_init_scan_params(priv); + + /* Set the tx_power_user_lmt to the lowest power level + * this value will get overwritten by channel max power avg + * from eeprom */ + priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN; + priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN; + + ret = iwl_legacy_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = iwl_legacy_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl4965_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_legacy_free_channel_map(priv); +err: + return ret; +} + +static void iwl4965_uninit_drv(struct iwl_priv *priv) +{ + iwl4965_calib_free_results(priv); + iwl_legacy_free_geos(priv); + iwl_legacy_free_channel_map(priv); + kfree(priv->scan_cmd); +} + +static void iwl4965_hw_detect(struct iwl_priv *priv) +{ + priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV); + priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG); + pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); + IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); +} + +static int iwl4965_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + if (priv->cfg->mod_params->amsdu_size_8K) + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); + else + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); + + priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; + + if (priv->cfg->mod_params->disable_11n) + priv->cfg->sku &= ~IWL_SKU_N; + + /* Device-specific setup */ + return priv->cfg->ops->lib->set_hw_params(priv); +} + +static const u8 iwl4965_bss_ac_to_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, +}; + +static const u8 iwl4965_bss_ac_to_queue[] = { + 0, 1, 2, 3, +}; + +static int +iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0, i; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + unsigned long flags; + u16 pci_cmd; + + /************************ + * 1. Allocating HW data + ************************/ + + hw = iwl_legacy_alloc_all(cfg); + if (!hw) { + err = -ENOMEM; + goto out; + } + priv = hw->priv; + /* At this point both hw and priv are allocated. */ + + /* + * The default context is always valid, + * more may be discovered when firmware + * is loaded. + */ + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); + + for (i = 0; i < NUM_IWL_RXON_CTX; i++) + priv->contexts[i].ctxid = i; + + priv->contexts[IWL_RXON_CTX_BSS].always_active = true; + priv->contexts[IWL_RXON_CTX_BSS].is_active = true; + priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; + priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; + priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; + priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo; + priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue; + priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = + BIT(NL80211_IFTYPE_ADHOC); + priv->contexts[IWL_RXON_CTX_BSS].interface_modes = + BIT(NL80211_IFTYPE_STATION); + priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; + priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; + priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; + priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1); + + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /************************** + * 2. Initializing PCI bus + **************************/ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + pci_set_drvdata(pdev, priv); + + + /*********************** + * 3. Read REV register + ***********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + iwl4965_hw_detect(priv); + IWL_INFO(priv, "Detected %s, REV=0x%X\n", + priv->cfg->name, priv->hw_rev); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl4965_prepare_card_hw(priv); + if (!priv->hw_ready) { + IWL_WARN(priv, "Failed, HW not ready\n"); + goto out_iounmap; + } + + /***************** + * 4. Read EEPROM + *****************/ + /* Read the EEPROM */ + err = iwl_legacy_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + err = iwl4965_eeprom_check_version(priv); + if (err) + goto out_free_eeprom; + + if (err) + goto out_free_eeprom; + + /* extract MAC Address */ + iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); + priv->hw->wiphy->addresses = priv->addresses; + priv->hw->wiphy->n_addresses = 1; + + /************************ + * 5. Setup HW constants + ************************/ + if (iwl4965_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw parameters\n"); + goto out_free_eeprom; + } + + /******************* + * 6. Setup priv + *******************/ + + err = iwl4965_init_drv(priv); + if (err) + goto out_free_eeprom; + /* At this point both hw and priv are initialized. */ + + /******************** + * 7. Setup services + ********************/ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + + iwl4965_setup_deferred_work(priv); + iwl4965_setup_rx_handlers(priv); + + /********************************************* + * 8. Enable interrupts and read RFKILL state + *********************************************/ + + /* enable interrupts if needed: hw bug w/a */ + pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); + } + + iwl_legacy_enable_interrupts(priv); + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + + iwl_legacy_power_initialize(priv); + + init_completion(&priv->_4965.firmware_loading_complete); + + err = iwl4965_request_firmware(priv, true); + if (err) + goto out_destroy_workqueue; + + return 0; + + out_destroy_workqueue: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + free_irq(priv->pci_dev->irq, priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl4965_uninit_drv(priv); + out_free_eeprom: + iwl_legacy_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_legacy_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + wait_for_completion(&priv->_4965.firmware_loading_complete); + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_legacy_dbgfs_unregister(priv); + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + /* ieee80211_unregister_hw call wil cause iwl_mac_stop to + * to be called and iwl4965_down since we are removing the device + * we need to set STATUS_EXIT_PENDING bit. + */ + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_legacy_leds_exit(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl4965_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl4965_down(), but there are paths to + * run iwl4965_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl4965_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + iwl_legacy_apm_stop(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl4965_synchronize_irq(priv); + + iwl4965_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl4965_rx_queue_free(priv, &priv->rxq); + iwl4965_hw_txq_ctx_free(priv); + + iwl_legacy_eeprom_free(priv); + + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_legacy_free_traffic_mem(priv); + + free_irq(priv->pci_dev->irq, priv); + pci_disable_msi(priv->pci_dev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl4965_uninit_drv(priv); + + dev_kfree_skb(priv->beacon_skb); + + ieee80211_free_hw(priv->hw); +} + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under priv->lock and mac access + */ +void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) +{ + iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask); +} + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = { +#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965) + {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)}, + {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)}, +#endif /* CONFIG_IWL4965 */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids); + +static struct pci_driver iwl4965_driver = { + .name = DRV_NAME, + .id_table = iwl4965_hw_card_ids, + .probe = iwl4965_pci_probe, + .remove = __devexit_p(iwl4965_pci_remove), + .driver.pm = IWL_LEGACY_PM_OPS, +}; + +static int __init iwl4965_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = iwl4965_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl4965_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwl4965_rate_control_unregister(); + return ret; +} + +static void __exit iwl4965_exit(void) +{ + pci_unregister_driver(&iwl4965_driver); + iwl4965_rate_control_unregister(); +} + +module_exit(iwl4965_exit); +module_init(iwl4965_init); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + +module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); +MODULE_PARM_DESC(queues_num, "number of hw queues."); +module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); +module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); |