summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c37
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c16
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c29
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h95
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h35
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h117
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c399
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1046
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c1233
24 files changed, 1591 insertions, 1474 deletions
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 727fbb5db9da..5cf43236421e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -133,12 +133,3 @@ config IWLWIFI_P2P
support when it is loaded.
Say Y only if you want to experiment with P2P.
-
-config IWLWIFI_EXPERIMENTAL_MFP
- bool "support MFP (802.11w) even if uCode doesn't advertise"
- depends on IWLWIFI
- help
- This option enables experimental MFP (802.11W) support
- even if the microcode doesn't advertise it.
-
- Say Y only if you want to experiment with MFP.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 75e12f29d9eb..33b3ad2e546b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -176,8 +176,8 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
-int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
-void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
+int iwlagn_txfifo_flush(struct iwl_priv *priv);
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 01128c96b5d8..71ab76b2b39d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -986,8 +986,7 @@ struct iwl_rem_sta_cmd {
#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
-#define IWL_DROP_SINGLE 0
-#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
+#define IWL_DROP_ALL BIT(1)
/*
* REPLY_TXFIFO_FLUSH = 0x1e(command and response)
@@ -1004,14 +1003,14 @@ struct iwl_rem_sta_cmd {
* the flush operation ends when both the scheduler DMA done and TXFIFO empty
* are set.
*
- * @fifo_control: bit mask for which queues to flush
+ * @queue_control: bit mask for which queues to flush
* @flush_control: flush controls
* 0: Dump single MSDU
* 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
* 2: Dump all FIFO
*/
struct iwl_txfifo_flush_cmd {
- __le32 fifo_control;
+ __le32 queue_control;
__le16 flush_control;
__le16 reserved;
} __packed;
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 1a98fa3ab06d..769a08bca86f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -2101,7 +2101,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
if (iwl_is_rfkill(priv))
return -EFAULT;
- iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
+ iwlagn_dev_txfifo_flush(priv);
return count;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 8141f91c3725..29c571a56251 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -789,7 +789,6 @@ struct iwl_priv {
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
struct delayed_work hw_roc_disable_work;
- enum nl80211_channel_type hw_roc_chantype;
int hw_roc_duration;
bool hw_roc_setup, hw_roc_start_notified;
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index bef88c1a2c9b..7e59be4b89b8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -136,7 +136,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
* 1. acquire mutex before calling
* 2. make sure rf is on and not in exit state
*/
-int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
+int iwlagn_txfifo_flush(struct iwl_priv *priv)
{
struct iwl_txfifo_flush_cmd flush_cmd;
struct iwl_host_cmd cmd = {
@@ -146,35 +146,34 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
.data = { &flush_cmd, },
};
- might_sleep();
-
memset(&flush_cmd, 0, sizeof(flush_cmd));
- if (flush_control & BIT(IWL_RXON_CTX_BSS))
- flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
- IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
- IWL_SCD_MGMT_MSK;
- if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
- (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
- flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
- IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
- IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
- IWL_PAN_SCD_MULTICAST_MSK;
+
+ flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+ IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
+ IWL_SCD_MGMT_MSK;
+ if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+ flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
+ IWL_PAN_SCD_VI_MSK |
+ IWL_PAN_SCD_BE_MSK |
+ IWL_PAN_SCD_BK_MSK |
+ IWL_PAN_SCD_MGMT_MSK |
+ IWL_PAN_SCD_MULTICAST_MSK;
if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
- flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
+ flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
- IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
- flush_cmd.fifo_control);
- flush_cmd.flush_control = cpu_to_le16(flush_control);
+ IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
+ flush_cmd.queue_control);
+ flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
return iwl_dvm_send_cmd(priv, &cmd);
}
-void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
{
mutex_lock(&priv->mutex);
ieee80211_stop_queues(priv->hw);
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 2d9eee93c743..fb959b00b208 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -168,10 +168,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP
- /* enable 11w if the uCode advertise */
- if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
-#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */
+ /*
+ * Enable 11w if advertised by firmware and software crypto
+ * is not enabled (as the firmware will interpret some mgmt
+ * packets, so enabling it with software crypto isn't safe)
+ */
+ if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+ !iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
hw->sta_data_size = sizeof(struct iwl_station_priv);
@@ -1019,7 +1022,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
*/
if (drop) {
IWL_DEBUG_MAC80211(priv, "send flush command\n");
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
@@ -1032,8 +1035,8 @@ done:
}
static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type,
int duration)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1065,7 +1068,6 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
}
priv->hw_roc_channel = channel;
- priv->hw_roc_chantype = channel_type;
/* convert from ms to TU */
priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
priv->hw_roc_start_notified = false;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 408132cf83c1..e3a07c916812 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -511,7 +511,7 @@ static void iwl_bg_tx_flush(struct work_struct *work)
return;
IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
- iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
+ iwlagn_dev_txfifo_flush(priv);
}
/*
@@ -1191,10 +1191,6 @@ static void iwl_option_config(struct iwl_priv *priv)
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
{
- u16 radio_cfg;
-
- priv->eeprom_data->sku = priv->eeprom_data->sku;
-
if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
!priv->cfg->ht_params) {
IWL_ERR(priv, "Invalid 11n configuration\n");
@@ -1206,9 +1202,7 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
-
- radio_cfg = priv->eeprom_data->radio_cfg;
+ IWL_DEBUG_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
priv->hw_params.tx_chains_num =
num_of_ant(priv->eeprom_data->valid_tx_ant);
@@ -1218,9 +1212,9 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
priv->hw_params.rx_chains_num =
num_of_ant(priv->eeprom_data->valid_rx_ant);
- IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
- priv->eeprom_data->valid_tx_ant,
- priv->eeprom_data->valid_rx_ant);
+ IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ priv->eeprom_data->valid_tx_ant,
+ priv->eeprom_data->valid_rx_ant);
return 0;
}
@@ -1235,7 +1229,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
struct iwl_op_mode *op_mode;
u16 num_mac;
u32 ucode_flags;
- struct iwl_trans_config trans_cfg;
+ struct iwl_trans_config trans_cfg = {};
static const u8 no_reclaim_cmds[] = {
REPLY_RX_PHY_CMD,
REPLY_RX_MPDU_CMD,
@@ -1334,6 +1328,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
/* Configure transport layer */
iwl_trans_configure(priv->trans, &trans_cfg);
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+
/* At this point both hw and priv are allocated. */
SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
@@ -1508,10 +1505,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_tt_exit(priv);
- /*This will stop the queues, move the device to low power state */
- priv->ucode_loaded = false;
- iwl_trans_stop_device(priv->trans);
-
kfree(priv->eeprom_blob);
iwl_free_eeprom_data(priv->eeprom_data);
@@ -1927,8 +1920,6 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
* commands by clearing the ready bit */
clear_bit(STATUS_READY, &priv->status);
- wake_up(&priv->trans->wait_command_queue);
-
if (!ondemand) {
/*
* If firmware keep reloading, then it indicate something
@@ -2152,8 +2143,6 @@ static int __init iwl_init(void)
{
int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
ret = iwlagn_rate_control_register();
if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 5a9c325804f6..cac4f37cc427 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -631,8 +631,6 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->trans->wait_command_queue);
return 0;
}
@@ -901,7 +899,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_device_cmd *cmd)
{
struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
+ struct ieee80211_rx_status rx_status = {};
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
@@ -951,7 +949,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
+ /*rx_status.flag |= RX_FLAG_MACTIME_START;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index f5ca73a89870..4ae031f6726b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1075,14 +1075,11 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
static void iwlagn_set_tx_status(struct iwl_priv *priv,
struct ieee80211_tx_info *info,
- struct iwlagn_tx_resp *tx_resp,
- bool is_agg)
+ struct iwlagn_tx_resp *tx_resp)
{
- u16 status = le16_to_cpu(tx_resp->status.status);
+ u16 status = le16_to_cpu(tx_resp->status.status);
info->status.rates[0].count = tx_resp->failure_frame + 1;
- if (is_agg)
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
info);
@@ -1231,7 +1228,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (is_agg && !iwl_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
- tx_resp, is_agg);
+ tx_resp);
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 2cb1efbc5ed1..95e6d33f5159 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -254,7 +254,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
int ret;
int i;
- iwl_trans_fw_alive(priv->trans);
+ iwl_trans_fw_alive(priv->trans, 0);
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 87f465a49df1..196266aa5a9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -150,7 +150,7 @@ enum iwl_led_mode {
struct iwl_base_params {
int eeprom_size;
int num_of_queues; /* def: HW dependent */
- /* for iwl_apm_init() */
+ /* for iwl_pcie_apm_init() */
u32 pll_cfg_val;
const u16 max_ll_items;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 59a5f78402fc..b3fde5f7b9bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -25,6 +25,39 @@
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "iwl-trans.h"
+#if !defined(__IWLWIFI_DEVICE_TRACE)
+static inline bool iwl_trace_data(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (ieee80211_is_data(hdr->frame_control))
+ return skb->protocol != cpu_to_be16(ETH_P_PAE);
+ return false;
+}
+
+static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
+ void *rxbuf, size_t len)
+{
+ struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
+ struct ieee80211_hdr *hdr;
+
+ if (cmd->cmd != trans->rx_mpdu_cmd)
+ return len;
+
+ hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
+ trans->rx_mpdu_cmd_hdr_size);
+ if (!ieee80211_is_data(hdr->frame_control))
+ return len;
+ /* maybe try to identify EAPOL frames? */
+ return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+ ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
@@ -235,6 +268,48 @@ TRACE_EVENT(iwlwifi_dbg,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_data
+
+TRACE_EVENT(iwlwifi_dev_tx_data,
+ TP_PROTO(const struct device *dev,
+ struct sk_buff *skb,
+ void *data, size_t data_len),
+ TP_ARGS(dev, skb, data, data_len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ if (iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(data), data, data_len);
+ ),
+ TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_rx_data,
+ TP_PROTO(const struct device *dev,
+ const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+
+ __dynamic_array(u8, data,
+ len - iwl_rx_trace_len(trans, rxbuf, len))
+ ),
+ TP_fast_assign(
+ size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+ DEV_ASSIGN;
+ if (offs < len)
+ memcpy(__get_dynamic_array(data),
+ ((u8 *)rxbuf) + offs, len - offs);
+ ),
+ TP_printk("[%s] RX frame data", __get_str(dev))
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
@@ -270,25 +345,28 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(const struct device *dev, void *rxbuf, size_t len),
- TP_ARGS(dev, rxbuf, len),
+ TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
+ void *rxbuf, size_t len),
+ TP_ARGS(dev, trans, rxbuf, len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, rxbuf, len)
+ __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
),
TP_fast_assign(
DEV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
+ memcpy(__get_dynamic_array(rxbuf), rxbuf,
+ iwl_rx_trace_len(trans, rxbuf, len));
),
TP_printk("[%s] RX cmd %#.2x",
__get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen,
+ TP_PROTO(const struct device *dev, struct sk_buff *skb,
+ void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
- TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
+ TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
TP_STRUCT__entry(
DEV_ENTRY
@@ -301,14 +379,15 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
+ __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
),
TP_fast_assign(
DEV_ASSIGN;
__entry->framelen = buf0_len + buf1_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
+ if (!iwl_trace_data(skb))
+ memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
),
TP_printk("[%s] TX %.2x (%zu bytes)",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index f10170fe8799..4a9dc9629efe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -889,8 +889,8 @@ int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
{
if (data->eeprom_version >= trans->cfg->eeprom_ver ||
data->calib_version >= trans->cfg->eeprom_calib_ver) {
- IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- data->eeprom_version, data->calib_version);
+ IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ data->eeprom_version, data->calib_version);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 806046641747..ec48563d3c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -267,7 +267,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
-#define RX_RB_TIMEOUT (0x10)
+#define RX_RB_TIMEOUT (0x11)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3dfebfb8434f..54c41b44bffe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -327,11 +327,11 @@ u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+ const void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
- u32 *vals = buf;
+ const u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 50d3819739d1..e1aa69f66de6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -87,7 +87,7 @@ void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
} while (0)
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
+ const void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 9253ef1dba72..c3a4bb41e533 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -213,6 +213,9 @@
#define SCD_CONTEXT_QUEUE_OFFSET(x)\
(SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
+#define SCD_TX_STTS_QUEUE_OFFSET(x)\
+ (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
+
#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index ff1154232885..e378ea6dca9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -221,14 +221,21 @@ struct iwl_device_cmd {
/**
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
- * IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
+ * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
* rather copies it to an previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
- * buffer. This can save memcpy and is worth with very big comamnds.
+ * buffer (that is passed in) instead. This saves the memcpy and allows
+ * commands that are bigger than the fixed buffer to be submitted.
+ * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
+ * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
+ * chunk internally and free it again after the command completes. This
+ * can (currently) be used only once per command.
+ * Note that a TFD entry after a DUP one cannot be a normal copied one.
*/
enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
+ IWL_HCMD_DFL_DUP = BIT(1),
};
/**
@@ -348,14 +355,17 @@ struct iwl_trans;
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
* May sleep
- * @fw_alive: called when the fw sends alive notification
+ * @fw_alive: called when the fw sends alive notification. If the fw provides
+ * the SCD base address in SRAM, then provide it here, or 0 otherwise.
* May sleep
* @stop_device:stops the whole device (embedded CPU put to reset)
* May sleep
* @wowlan_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
- * @send_cmd:send a host command
+ * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
+ * If RFkill is asserted in the middle of a SYNC host command, it must
+ * return -ERFKILL straight away.
* May sleep only if CMD_SYNC is set
* @tx: send an skb
* Must be atomic
@@ -385,7 +395,7 @@ struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
- void (*fw_alive)(struct iwl_trans *trans);
+ void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans);
void (*wowlan_suspend)(struct iwl_trans *trans);
@@ -438,12 +448,15 @@ enum iwl_trans_state {
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
- * @wait_command_queue: the wait_queue for SYNC host commands
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
* @dev_cmd_headroom: room needed for the transport's private use before the
* device_cmd for Tx - for internal use only
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ * starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ * start of the 802.11 header in the @rx_mpdu_cmd
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -457,9 +470,9 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
- bool pm_support;
+ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
- wait_queue_head_t wait_command_queue;
+ bool pm_support;
/* The following fields are internal only */
struct kmem_cache *dev_cmd_pool;
@@ -502,13 +515,13 @@ static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
trans->state = IWL_TRANS_NO_FW;
}
-static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
+static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
might_sleep();
trans->state = IWL_TRANS_FW_ALIVE;
- trans->ops->fw_alive(trans);
+ trans->ops->fw_alive(trans, scd_addr);
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -516,6 +529,8 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
{
might_sleep();
+ WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
return trans->ops->start_fw(trans, fw);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2a4675396707..956fe6c370bc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -69,7 +69,6 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
-#include "iwl-trans.h"
#include "cfg.h"
#include "internal.h"
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 401178f44a3b..d91d2e8c62f5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -73,7 +73,7 @@ struct isr_statistics {
};
/**
- * struct iwl_rx_queue - Rx queue
+ * struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @pool:
@@ -91,7 +91,7 @@ struct isr_statistics {
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
-struct iwl_rx_queue {
+struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -157,8 +157,8 @@ struct iwl_cmd_meta {
* 32 since we don't need so many commands pending. Since the HW
* still uses 256 BDs for DMA though, n_bd stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
- * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
- * in the same struct) have 256.
+ * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
+ * the same struct) have 256.
* This means that we end up with the following:
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
* SW entries: | 0 | ... | 31 |
@@ -182,15 +182,17 @@ struct iwl_queue {
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
-struct iwl_pcie_tx_queue_entry {
+struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
struct iwl_device_cmd *copy_cmd;
struct sk_buff *skb;
+ /* buffer to free after command completes */
+ const void *free_buf;
struct iwl_cmd_meta meta;
};
/**
- * struct iwl_tx_queue - Tx Queue for DMA
+ * struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
* @entries: transmit entries (driver state)
@@ -203,10 +205,10 @@ struct iwl_pcie_tx_queue_entry {
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
*/
-struct iwl_tx_queue {
+struct iwl_txq {
struct iwl_queue q;
struct iwl_tfd *tfds;
- struct iwl_pcie_tx_queue_entry *entries;
+ struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
struct timer_list stuck_timer;
struct iwl_trans_pcie *trans_pcie;
@@ -236,7 +238,7 @@ struct iwl_tx_queue {
* @wd_timeout: queue watchdog timeout (jiffies)
*/
struct iwl_trans_pcie {
- struct iwl_rx_queue rxq;
+ struct iwl_rxq rxq;
struct work_struct rx_replenish;
struct iwl_trans *trans;
struct iwl_drv *drv;
@@ -258,7 +260,7 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
- struct iwl_tx_queue *txq;
+ struct iwl_txq *txq;
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
@@ -268,6 +270,8 @@ struct iwl_trans_pcie {
bool ucode_write_complete;
wait_queue_head_t ucode_write_waitq;
+ wait_queue_head_t wait_command_queue;
+
unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
@@ -283,13 +287,23 @@ struct iwl_trans_pcie {
unsigned long wd_timeout;
};
-/*****************************************************
-* DRIVER STATUS FUNCTIONS
-******************************************************/
-#define STATUS_HCMD_ACTIVE 0
-#define STATUS_DEVICE_ENABLED 1
-#define STATUS_TPOWER_PMI 2
-#define STATUS_INT_ENABLED 3
+/**
+ * enum iwl_pcie_status: status of the PCIe transport
+ * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL: the HW RFkill switch is in KILL position
+ * @STATUS_FW_ERROR: the fw is in error state
+ */
+enum iwl_pcie_status {
+ STATUS_HCMD_ACTIVE,
+ STATUS_DEVICE_ENABLED,
+ STATUS_TPOWER_PMI,
+ STATUS_INT_ENABLED,
+ STATUS_RFKILL,
+ STATUS_FW_ERROR,
+};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
@@ -301,6 +315,10 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
trans_specific);
}
+/*
+ * Convention: trans API functions: iwl_trans_pcie_XXX
+ * Other functions: iwl_pcie_XXX
+ */
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg *cfg);
@@ -309,50 +327,43 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
/*****************************************************
* RX
******************************************************/
-void iwl_bg_rx_replenish(struct work_struct *data);
-void iwl_irq_tasklet(struct iwl_trans *trans);
-void iwl_rx_replenish(struct iwl_trans *trans);
-void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q);
+int iwl_pcie_rx_init(struct iwl_trans *trans);
+void iwl_pcie_tasklet(struct iwl_trans *trans);
+int iwl_pcie_rx_stop(struct iwl_trans *trans);
+void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
-* ICT
+* ICT - interrupt handling
******************************************************/
-void iwl_reset_ict(struct iwl_trans *trans);
-void iwl_disable_ict(struct iwl_trans *trans);
-int iwl_alloc_isr_ict(struct iwl_trans *trans);
-void iwl_free_isr_ict(struct iwl_trans *trans);
-irqreturn_t iwl_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+int iwl_pcie_alloc_ict(struct iwl_trans *trans);
+void iwl_pcie_free_ict(struct iwl_trans *trans);
+void iwl_pcie_reset_ict(struct iwl_trans *trans);
+void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_trans *trans,
- struct iwl_tx_queue *txq);
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset);
-int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
-int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_tx_cmd_complete(struct iwl_trans *trans,
- struct iwl_rx_cmd_buffer *rxb, int handler_status);
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
+int iwl_pcie_tx_init(struct iwl_trans *trans);
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+int iwl_pcie_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_tx_free(struct iwl_trans *trans);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- enum dma_data_direction dma_dir);
-int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
- struct sk_buff_head *skbs);
-int iwl_queue_space(const struct iwl_queue *q);
-
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id);
+void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb, int handler_status);
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs);
/*****************************************************
* Error handling
******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf);
-void iwl_dump_csr(struct iwl_trans *trans);
+int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
+void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
* Helpers
@@ -388,7 +399,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -399,7 +410,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_txq *txq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -411,7 +422,7 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
txq->q.id);
}
-static inline int iwl_queue_used(const struct iwl_queue *q, int i)
+static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
@@ -423,8 +434,8 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
return index & (q->n_window - 1);
}
-static inline const char *
-trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
+static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
+ u8 cmd)
{
if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index bb69f8f90b3b..bb32510fdd62 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -76,7 +76,7 @@
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
* to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
* iwl->rxq is replenished and the READ INDEX is updated (updating the
* 'processed' and 'read' driver indexes as well)
* + A received packet is processed and handed to the kernel network stack,
@@ -89,28 +89,28 @@
*
* Driver sequence:
*
- * iwl_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * iwl_rxq_alloc() Allocates rx_free
+ * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
* the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
+ * are available, schedules iwl_pcie_rx_replenish
*
* -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
+ * Calls iwl_pcie_rxq_restock to refill any empty
* slots.
* ...
*
*/
-/**
- * iwl_rx_queue_space - Return number of free slots available in queue.
+/*
+ * iwl_rxq_space - Return number of free slots available in queue.
*/
-static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+static int iwl_rxq_space(const struct iwl_rxq *q)
{
int s = q->read - q->write;
if (s <= 0)
@@ -122,11 +122,28 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
return s;
}
-/**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+/*
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/*
+ * iwl_pcie_rx_stop - stops the Rx DMA
+ */
+int iwl_pcie_rx_stop(struct iwl_trans *trans)
+{
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+/*
+ * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
{
unsigned long flags;
u32 reg;
@@ -176,16 +193,8 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
spin_unlock_irqrestore(&q->lock, flags);
}
-/**
- * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
* and we have free pre-allocated buffers, fill the ranks as much
@@ -195,11 +204,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-static void iwl_rx_queue_restock(struct iwl_trans *trans)
+static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct list_head *element;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
@@ -215,18 +223,18 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
return;
spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
BUG_ON(rxb && rxb->page);
/* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
+ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma);
+ rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
@@ -243,24 +251,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(trans, rxq);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
/*
- * iwl_rx_allocate - allocate a page for each used RBD
+ * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
* A used RBD is an Rx buffer that has been given to the stack. To use it again
* a page must be allocated and the RBD must point to the page. This function
* doesn't change the HW pointer but handles the list of pages that is used by
- * iwl_rx_queue_restock. The latter function will update the HW to use the newly
+ * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct list_head *element;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
unsigned long flags;
@@ -308,10 +315,9 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
__free_pages(page, trans_pcie->rx_page_order);
return;
}
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
+ rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
spin_unlock_irqrestore(&rxq->lock, flags);
BUG_ON(rxb->page);
@@ -343,47 +349,227 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
}
}
+static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ int i;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rxq->pool[i].page,
+ trans_pcie->rx_page_order);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+}
+
/*
- * iwl_rx_replenish - Move all used buffers from rx_used to rx_free
+ * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
*
* When moving to rx_free an page is allocated for the slot.
*
- * Also restock the Rx queue via iwl_rx_queue_restock.
+ * Also restock the Rx queue via iwl_pcie_rxq_restock.
* This is called as a scheduled work item (except for during initialization)
*/
-void iwl_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
- iwl_rx_allocate(trans, GFP_KERNEL);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwl_rx_queue_restock(trans);
+ iwl_pcie_rxq_restock(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
-static void iwl_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
{
- iwl_rx_allocate(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
- iwl_rx_queue_restock(trans);
+ iwl_pcie_rxq_restock(trans);
}
-void iwl_bg_rx_replenish(struct work_struct *data)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
{
struct iwl_trans_pcie *trans_pcie =
container_of(data, struct iwl_trans_pcie, rx_replenish);
- iwl_rx_replenish(trans_pcie->trans);
+ iwl_pcie_rx_replenish(trans_pcie->trans);
+}
+
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct device *dev = trans->dev;
+
+ memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+
+ spin_lock_init(&rxq->lock);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
+}
+
+static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+
+ if (trans_pcie->rx_buf_size_8k)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size|
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+ int i, err;
+ unsigned long flags;
+
+ if (!rxq->bd) {
+ err = iwl_pcie_rx_alloc(trans);
+ if (err)
+ return err;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ INIT_WORK(&trans_pcie->rx_replenish,
+ iwl_pcie_rx_replenish_work);
+
+ iwl_pcie_rxq_free_rbs(trans);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ iwl_pcie_rx_replenish(trans);
+
+ iwl_pcie_rx_hw_init(trans, rxq);
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ rxq->need_update = 1;
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ return 0;
+}
+
+void iwl_pcie_rx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ unsigned long flags;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ iwl_pcie_rxq_free_rbs(trans);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
+ memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts = NULL;
}
-static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
+static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -413,13 +599,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
break;
IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
- rxcb._offset,
- trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
+ rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
pkt->hdr.cmd);
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+ trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+ trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -445,7 +631,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
cmd_index = get_cmd_index(&txq->q, index);
if (reclaim) {
- struct iwl_pcie_tx_queue_entry *ent;
+ struct iwl_pcie_txq_entry *ent;
ent = &txq->entries[cmd_index];
cmd = ent->copy_cmd;
WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
@@ -459,6 +645,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
/* The original command isn't needed any more */
kfree(txq->entries[cmd_index].copy_cmd);
txq->entries[cmd_index].copy_cmd = NULL;
+ /* nor is the duplicated part of the command */
+ kfree(txq->entries[cmd_index].free_buf);
+ txq->entries[cmd_index].free_buf = NULL;
}
/*
@@ -472,7 +661,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
* iwl_trans_send_cmd()
* as we reclaim the driver command queue */
if (!rxcb._page_stolen)
- iwl_tx_cmd_complete(trans, &rxcb, err);
+ iwl_pcie_hcmd_complete(trans, &rxcb, err);
else
IWL_WARN(trans, "Claim null rxb?\n");
}
@@ -514,17 +703,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
spin_unlock_irqrestore(&rxq->lock, flags);
}
-/**
- * iwl_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
+/*
+ * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
-static void iwl_rx_handle(struct iwl_trans *trans)
+static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 r, i;
u8 fill_rx = 0;
u32 count = 8;
@@ -555,7 +740,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
r, i, rxb);
- iwl_rx_handle_rxbuf(trans, rxb);
+ iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
@@ -564,7 +749,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
count++;
if (count >= 8) {
rxq->read = i;
- iwl_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish_now(trans);
count = 0;
}
}
@@ -573,39 +758,41 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
- iwl_rx_replenish_now(trans);
+ iwl_pcie_rx_replenish_now(trans);
else
- iwl_rx_queue_restock(trans);
+ iwl_pcie_rxq_restock(trans);
}
-/**
- * iwl_irq_handle_error - called for HW or SW error interrupt from card
+/*
+ * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
*/
-static void iwl_irq_handle_error(struct iwl_trans *trans)
+static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
APMG_PS_CTRL_VAL_RESET_REQ))) {
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
iwl_op_mode_wimax_active(trans->op_mode);
- wake_up(&trans->wait_command_queue);
+ wake_up(&trans_pcie->wait_command_queue);
return;
}
- iwl_dump_csr(trans);
- iwl_dump_fh(trans, NULL);
+ iwl_pcie_dump_csr(trans);
+ iwl_pcie_dump_fh(trans, NULL);
+
+ set_bit(STATUS_FW_ERROR, &trans_pcie->status);
+ clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+ wake_up(&trans_pcie->wait_command_queue);
iwl_op_mode_nic_error(trans->op_mode);
}
-/* tasklet for iwlagn interrupt */
-void iwl_irq_tasklet(struct iwl_trans *trans)
+void iwl_pcie_tasklet(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -657,7 +844,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
iwl_disable_interrupts(trans);
isr_stats->hw++;
- iwl_irq_handle_error(trans);
+ iwl_pcie_irq_handle_error(trans);
handled |= CSR_INT_BIT_HW_ERR;
@@ -694,6 +881,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
isr_stats->rfkill++;
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL, &trans_pcie->status);
+ if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status))
+ IWL_DEBUG_RF_KILL(trans,
+ "Rfkill while SYNC HCMD in flight\n");
+ wake_up(&trans_pcie->wait_command_queue);
+ } else {
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
+ }
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -710,17 +907,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
IWL_ERR(trans, "Microcode SW error detected. "
" Restarting 0x%X.\n", inta);
isr_stats->sw++;
- iwl_irq_handle_error(trans);
+ iwl_pcie_irq_handle_error(trans);
handled |= CSR_INT_BIT_SW_ERR;
}
/* uCode wakes up after power-down sleep */
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
+ iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
- iwl_txq_update_write_ptr(trans,
- &trans_pcie->txq[i]);
+ iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
isr_stats->wakeup++;
@@ -758,7 +954,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
iwl_write8(trans, CSR_INT_PERIODIC_REG,
CSR_INT_PERIODIC_DIS);
- iwl_rx_handle(trans);
+ iwl_pcie_rx_handle(trans);
/*
* Enable periodic interrupt in 8 msec only if we received
@@ -816,7 +1012,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
#define ICT_COUNT (ICT_SIZE / sizeof(u32))
/* Free dram table */
-void iwl_free_isr_ict(struct iwl_trans *trans)
+void iwl_pcie_free_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -829,13 +1025,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
}
}
-
/*
* allocate dram shared table, it is an aligned memory
* block of ICT_SIZE.
* also reset all data related to ICT table interrupt.
*/
-int iwl_alloc_isr_ict(struct iwl_trans *trans)
+int iwl_pcie_alloc_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -848,7 +1043,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
/* just an API sanity check ... it is guaranteed to be aligned */
if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
- iwl_free_isr_ict(trans);
+ iwl_pcie_free_ict(trans);
return -EINVAL;
}
@@ -869,7 +1064,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
/* Device is going up inform it about using ICT interrupt table,
* also we need to tell the driver to start using ICT interrupt.
*/
-void iwl_reset_ict(struct iwl_trans *trans)
+void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
@@ -899,7 +1094,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
}
/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_trans *trans)
+void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
@@ -910,7 +1105,7 @@ void iwl_disable_ict(struct iwl_trans *trans)
}
/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_isr(int irq, void *data)
+static irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -957,7 +1152,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
#endif
trans_pcie->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ /* iwl_pcie_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
@@ -982,7 +1177,7 @@ none:
* the interrupt we need to service, driver will set the entries back to 0 and
* set index.
*/
-irqreturn_t iwl_isr_ict(int irq, void *data)
+irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
{
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie;
@@ -1002,14 +1197,13 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
* use legacy interrupt.
*/
if (unlikely(!trans_pcie->use_ict)) {
- irqreturn_t ret = iwl_isr(irq, data);
+ irqreturn_t ret = iwl_pcie_isr(irq, data);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return ret;
}
trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
@@ -1018,7 +1212,6 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
@@ -1067,7 +1260,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
inta &= trans_pcie->inta_mask;
trans_pcie->inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ /* iwl_pcie_tasklet() will service interrupts and re-enable them */
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index fe0fffd04304..f6c21e7edaf2 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -74,584 +74,8 @@
#include "iwl-prph.h"
#include "iwl-agn-hw.h"
#include "internal.h"
-/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "dvm/commands.h"
-#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
- (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
- (~(1<<(trans_pcie)->cmd_queue)))
-
-static int iwl_trans_rx_alloc(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- struct device *dev = trans->dev;
-
- memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
-
- spin_lock_init(&rxq->lock);
-
- if (WARN_ON(rxq->bd || rxq->rb_stts))
- return -EINVAL;
-
- /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb_stts;
-
- return 0;
-
-err_rb_stts:
- dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-err_bd:
- return -ENOMEM;
-}
-
-static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- int i;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rxq->pool[i].page,
- trans_pcie->rx_page_order);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-}
-
-static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
- struct iwl_rx_queue *rxq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- if (trans_pcie->rx_buf_size_8k)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-}
-
-static int iwl_rx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-
- int i, err;
- unsigned long flags;
-
- if (!rxq->bd) {
- err = iwl_trans_rx_alloc(trans);
- if (err)
- return err;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- iwl_trans_rxq_free_rx_bufs(trans);
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- iwl_rx_replenish(trans);
-
- iwl_trans_rx_hw_init(trans, rxq);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- return 0;
-}
-
-static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
- unsigned long flags;
-
- /*if rxq->bd is NULL, it means that nothing has been allocated,
- * exit now */
- if (!rxq->bd) {
- IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- iwl_trans_rxq_free_rx_bufs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
- memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
- rxq->rb_stts = NULL;
-}
-
-static int iwl_trans_rx_stop(struct iwl_trans *trans)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-}
-
-static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- if (WARN_ON(ptr->addr))
- return -EINVAL;
-
- ptr->addr = dma_alloc_coherent(trans->dev, size,
- &ptr->dma, GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
-{
- struct iwl_tx_queue *txq = (void *)data;
- struct iwl_queue *q = &txq->q;
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- u32 scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
- u8 buf[16];
- int i;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->q.read_ptr == txq->q.write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
- jiffies_to_msecs(trans_pcie->wd_timeout));
- IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
-
- iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
- iwl_print_hex_error(trans, buf, sizeof(buf));
-
- for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
- IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
- iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
-
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
- u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- u32 tbl_dw =
- iwl_read_targ_mem(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
-
- if (i & 0x1)
- tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
- else
- tbl_dw = tbl_dw & 0x0000FFFF;
-
- IWL_ERR(trans,
- "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
- i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans,
- SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
- }
-
- for (i = q->read_ptr; i != q->write_ptr;
- i = iwl_queue_inc_wrap(i, q->n_bd)) {
- struct iwl_tx_cmd *tx_cmd =
- (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
- IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
- get_unaligned_le32(&tx_cmd->scratch));
- }
-
- iwl_op_mode_nic_error(trans->op_mode);
-}
-
-static int iwl_trans_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, int slots_num,
- u32 txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
- int i;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
- (unsigned long)txq);
- txq->trans_pcie = trans_pcie;
-
- txq->q.n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_tx_queue_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = txq_id;
-
- return 0;
-error:
- if (txq->entries && txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-
-}
-
-static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int ret;
-
- txq->need_update = 0;
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
- txq_id);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- /*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- * Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/**
- * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- enum dma_data_direction dma_dir;
-
- if (!q->n_bd)
- return;
-
- /* In the command queue, all the TBs are mapped as BIDI
- * so unmap them as such.
- */
- if (txq_id == trans_pcie->cmd_queue)
- dma_dir = DMA_BIDIRECTIONAL;
- else
- dma_dir = DMA_TO_DEVICE;
-
- spin_lock_bh(&txq->lock);
- while (q->write_ptr != q->read_ptr) {
- iwl_txq_free_tfd(trans, txq, dma_dir);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
- spin_unlock_bh(&txq->lock);
-}
-
-/**
- * iwl_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct device *dev = trans->dev;
- int i;
-
- if (WARN_ON(!txq))
- return;
-
- iwl_tx_queue_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans_pcie->cmd_queue)
- for (i = 0; i < txq->q.n_window; i++) {
- kfree(txq->entries[i].cmd);
- kfree(txq->entries[i].copy_cmd);
- }
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd) {
- dma_free_coherent(dev, sizeof(struct iwl_tfd) *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
- }
-
- kfree(txq->entries);
- txq->entries = NULL;
-
- del_timer_sync(&txq->stuck_timer);
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * iwl_trans_tx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
-{
- int txq_id;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- /* Tx queues */
- if (trans_pcie->txq) {
- for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
- iwl_tx_queue_free(trans, txq_id);
- }
-
- kfree(trans_pcie->txq);
- trans_pcie->txq = NULL;
-
- iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
-
- iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
-}
-
-/**
- * iwl_trans_tx_alloc - allocate TX context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-static int iwl_trans_tx_alloc(struct iwl_trans *trans)
-{
- int ret;
- int txq_id, slots_num;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
- sizeof(struct iwlagn_scd_bc_tbl);
-
- /*It is not allowed to alloc twice, so warn when this happens.
- * We cannot rely on the previous allocation, so free and fail */
- if (WARN_ON(trans_pcie->txq)) {
- ret = -EINVAL;
- goto error;
- }
-
- ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
- scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- goto error;
- }
-
- /* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(trans, "Keep Warm allocation failed\n");
- goto error;
- }
-
- trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_tx_queue), GFP_KERNEL);
- if (!trans_pcie->txq) {
- IWL_ERR(trans, "Not enough memory for txq\n");
- ret = ENOMEM;
- goto error;
- }
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
- txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-
-error:
- iwl_trans_pcie_tx_free(trans);
-
- return ret;
-}
-static int iwl_tx_init(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
- bool alloc = false;
-
- if (!trans_pcie->txq) {
- ret = iwl_trans_tx_alloc(trans);
- if (ret)
- goto error;
- alloc = true;
- }
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl_write_prph(trans, SCD_TXFACT, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
- trans_pcie->kw.dma >> 4);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
- txq_id++) {
- slots_num = (txq_id == trans_pcie->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
- slots_num, txq_id);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return 0;
-error:
- /*Upon error, free only if we allocated something */
- if (alloc)
- iwl_trans_pcie_tx_free(trans);
- return ret;
-}
-
-static void iwl_set_pwr_vmain(struct iwl_trans *trans)
+static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
{
/*
* (for documentation purposes)
@@ -673,18 +97,11 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
+static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 pci_lnk_ctl;
-
- pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
- &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
+ u16 lctl;
-static void iwl_apm_config(struct iwl_trans *trans)
-{
/*
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
* Check if BIOS (or OS) enabled L1-ASPM on this device.
@@ -693,29 +110,27 @@ static void iwl_apm_config(struct iwl_trans *trans)
* If not (unlikely), enable L0S, so there is at least some
* power savings, even without L1.
*/
- u16 lctl = iwl_pciexp_link_ctrl(trans);
+ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
/* L1-ASPM enabled; disable(!) L0S */
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- dev_printk(KERN_INFO, trans->dev,
- "L1 Enabled; Disabling L0S\n");
+ dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
} else {
/* L1-ASPM disabled; enable(!) L0S */
iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- dev_printk(KERN_INFO, trans->dev,
- "L1 Disabled; Enabling L0S\n");
+ dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
}
trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
}
/*
* Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_apm_stop())
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
* NOTE: This does not load uCode nor start the embedded processor
*/
-static int iwl_apm_init(struct iwl_trans *trans)
+static int iwl_pcie_apm_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret = 0;
@@ -747,7 +162,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
- iwl_apm_config(trans);
+ iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
if (trans->cfg->base_params->pll_cfg_val)
@@ -793,7 +208,7 @@ out:
return ret;
}
-static int iwl_apm_stop_master(struct iwl_trans *trans)
+static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
@@ -811,7 +226,7 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
return ret;
}
-static void iwl_apm_stop(struct iwl_trans *trans)
+static void iwl_pcie_apm_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
@@ -819,7 +234,7 @@ static void iwl_apm_stop(struct iwl_trans *trans)
clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
/* Stop device's DMA activity */
- iwl_apm_stop_master(trans);
+ iwl_pcie_apm_stop_master(trans);
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -834,29 +249,29 @@ static void iwl_apm_stop(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-static int iwl_nic_init(struct iwl_trans *trans)
+static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
/* nic_init */
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- iwl_apm_init(trans);
+ iwl_pcie_apm_init(trans);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_set_pwr_vmain(trans);
+ iwl_pcie_set_pwr_vmain(trans);
iwl_op_mode_nic_config(trans->op_mode);
/* Allocate the RX queue, or reset if it is already allocated */
- iwl_rx_init(trans);
+ iwl_pcie_rx_init(trans);
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_tx_init(trans))
+ if (iwl_pcie_tx_init(trans))
return -ENOMEM;
if (trans->cfg->base_params->shadow_reg_enable) {
@@ -871,7 +286,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
#define HW_READY_TIMEOUT (50)
/* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_set_hw_ready(struct iwl_trans *trans)
+static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
{
int ret;
@@ -889,14 +304,14 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
}
/* Note: returns standard 0/-ERROR code */
-static int iwl_prepare_card_hw(struct iwl_trans *trans)
+static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
{
int ret;
int t = 0;
IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
- ret = iwl_set_hw_ready(trans);
+ ret = iwl_pcie_set_hw_ready(trans);
/* If the card is ready, exit 0 */
if (ret >= 0)
return 0;
@@ -906,7 +321,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
CSR_HW_IF_CONFIG_REG_PREPARE);
do {
- ret = iwl_set_hw_ready(trans);
+ ret = iwl_pcie_set_hw_ready(trans);
if (ret >= 0)
return 0;
@@ -920,7 +335,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
/*
* ucode
*/
-static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
dma_addr_t phy_addr, u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -967,7 +382,7 @@ static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
return 0;
}
-static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
+static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
const struct fw_desc *section)
{
u8 *v_addr;
@@ -988,8 +403,9 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
memcpy(v_addr, (u8 *)section->data + offset, copy_size);
- ret = iwl_load_firmware_chunk(trans, section->offset + offset,
- p_addr, copy_size);
+ ret = iwl_pcie_load_firmware_chunk(trans,
+ section->offset + offset,
+ p_addr, copy_size);
if (ret) {
IWL_ERR(trans,
"Could not load the [%d] uCode section\n",
@@ -1002,7 +418,7 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_load_given_ucode(struct iwl_trans *trans,
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
int i, ret = 0;
@@ -1011,7 +427,7 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
if (!image->sec[i].data)
break;
- ret = iwl_load_section(trans, i, &image->sec[i]);
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
if (ret)
return ret;
}
@@ -1025,15 +441,18 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
bool hw_rfkill;
/* This may fail if AMT took ownership of the device */
- if (iwl_prepare_card_hw(trans)) {
+ if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
return -EIO;
}
+ clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
+
iwl_enable_rfkill_int(trans);
/* If platform's RF_KILL switch is NOT set to KILL */
@@ -1044,7 +463,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
- ret = iwl_nic_init(trans);
+ ret = iwl_pcie_nic_init(trans);
if (ret) {
IWL_ERR(trans, "Unable to init nic\n");
return ret;
@@ -1064,125 +483,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- return iwl_load_given_ucode(trans, fw);
+ return iwl_pcie_load_given_ucode(trans, fw);
}
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- */
-static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
- struct iwl_trans_pcie __maybe_unused *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
- iwl_write_prph(trans, SCD_TXFACT, mask);
-}
-
-static void iwl_tx_start(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 a;
- int chan;
- u32 reg_val;
-
- /* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
-
- trans_pcie->scd_base_addr =
- iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
- a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- /* reset tx status memory */
- for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(
- trans->cfg->base_params->num_of_queues);
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
-
- iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans_pcie->scd_bc_tbls.dma >> 10);
-
- /* The chain extension of the SCD doesn't work well. This feature is
- * enabled by default by the HW, so we need to disable it manually.
- */
- iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
-
- iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
- trans_pcie->cmd_fifo);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-}
-
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
-{
- iwl_reset_ict(trans);
- iwl_tx_start(trans);
-}
-
-/**
- * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
- */
-static int iwl_trans_tx_stop(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ch, txq_id, ret;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- iwl_trans_txq_set_sched(trans, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
- iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
- if (ret < 0)
- IWL_ERR(trans,
- "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
- ch,
- iwl_read_direct32(trans,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
- return 0;
- }
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
- txq_id++)
- iwl_tx_queue_unmap(trans, txq_id);
-
- return 0;
+ iwl_pcie_reset_ict(trans);
+ iwl_pcie_tx_start(trans, scd_addr);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1196,7 +503,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
/* device going down, Stop using ICT table */
- iwl_disable_ict(trans);
+ iwl_pcie_disable_ict(trans);
/*
* If a HW restart happens during firmware loading,
@@ -1206,8 +513,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* already dead.
*/
if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
- iwl_trans_tx_stop(trans);
- iwl_trans_rx_stop(trans);
+ iwl_pcie_tx_stop(trans);
+ iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
iwl_write_prph(trans, APMG_CLK_DIS_REG,
@@ -1220,7 +527,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
- iwl_apm_stop(trans);
+ iwl_pcie_apm_stop(trans);
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
@@ -1245,6 +552,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
}
static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1258,169 +566,6 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
-static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- dma_addr_t phys_addr = 0;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u8 wait_write_ptr = 0;
- __le16 fc = hdr->frame_control;
- u8 hdr_len = ieee80211_hdrlen(fc);
- u16 __maybe_unused wifi_seq;
-
- txq = &trans_pcie->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- spin_lock(&txq->lock);
-
- /* In AGG mode, the index in the ring must correspond to the WiFi
- * sequence number. This is a HW requirements to help the SCD to parse
- * the BA.
- * Check here that the packets are in the right place on the ring.
- */
-#ifdef CONFIG_IWLWIFI_DEBUG
- wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
- ((wifi_seq & 0xff) != q->write_ptr),
- "Q: %d WiFi Seq %d tfdNum %d",
- txq_id, wifi_seq, q->write_ptr);
-#endif
-
- /* Set up driver data for this TFD */
- txq->entries[q->write_ptr].skb = skb;
- txq->entries[q->write_ptr].cmd = dev_cmd;
-
- dev_cmd->hdr.cmd = REPLY_TX;
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[q->write_ptr].meta;
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = dma_map_single(trans->dev,
- &dev_cmd->hdr, firstlen,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
- goto out_err;
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
-
- if (!ieee80211_has_morefrags(fc)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
- secondlen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
- dma_unmap_single(trans->dev,
- dma_unmap_addr(out_meta, mapping),
- dma_unmap_len(out_meta, len),
- DMA_BIDIRECTIONAL);
- goto out_err;
- }
- }
-
- /* Attach buffers to TFD */
- iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
- if (secondlen > 0)
- iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
- secondlen, 0);
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
-
- dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(trans->dev,
- &txq->tfds[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &dev_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* start timer if queue currently empty */
- if (txq->need_update && q->read_ptr == q->write_ptr &&
- trans_pcie->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(trans, txq);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
- if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr) {
- txq->need_update = 1;
- iwl_txq_update_write_ptr(trans, txq);
- } else {
- iwl_stop_queue(trans, txq);
- }
- }
- spin_unlock(&txq->lock);
- return 0;
- out_err:
- spin_unlock(&txq->lock);
- return -1;
-}
-
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1431,29 +576,28 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
if (!trans_pcie->irq_requested) {
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)trans);
+ iwl_pcie_tasklet, (unsigned long)trans);
- iwl_alloc_isr_ict(trans);
+ iwl_pcie_alloc_ict(trans);
- err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, trans);
+ err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
+ IRQF_SHARED, DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n",
trans_pcie->irq);
goto error;
}
- INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
trans_pcie->irq_requested = true;
}
- err = iwl_prepare_card_hw(trans);
+ err = iwl_pcie_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
goto err_free_irq;
}
- iwl_apm_init(trans);
+ iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
@@ -1467,7 +611,7 @@ err_free_irq:
trans_pcie->irq_requested = false;
free_irq(trans_pcie->irq, trans);
error:
- iwl_free_isr_ict(trans);
+ iwl_pcie_free_ict(trans);
tasklet_kill(&trans_pcie->irq_tasklet);
return err;
}
@@ -1483,7 +627,7 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
iwl_disable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- iwl_apm_stop(trans);
+ iwl_pcie_apm_stop(trans);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_disable_interrupts(trans);
@@ -1507,28 +651,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
}
}
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- /* n_bd is usually 256 => n_bd - 1 = 0xff */
- int tfd_num = ssn & (txq->q.n_bd - 1);
- int freed = 0;
-
- spin_lock(&txq->lock);
-
- if (txq->q.read_ptr != tfd_num) {
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->q.read_ptr, tfd_num, ssn);
- freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
- iwl_wake_queue(trans, txq);
- }
-
- spin_unlock(&txq->lock);
-}
-
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
@@ -1575,12 +697,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_trans_pcie_tx_free(trans);
- iwl_trans_pcie_rx_free(trans);
+ iwl_pcie_tx_free(trans);
+ iwl_pcie_rx_free(trans);
if (trans_pcie->irq_requested == true) {
free_irq(trans_pcie->irq, trans);
- iwl_free_isr_ict(trans);
+ iwl_pcie_free_ict(trans);
}
pci_disable_msi(trans_pcie->pci_dev);
@@ -1626,10 +748,10 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq;
+ struct iwl_txq *txq;
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
@@ -1673,7 +795,7 @@ static const char *get_fh_string(int cmd)
#undef IWL_CMD
}
-int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
{
int i;
static const u32 fh_tbl[] = {
@@ -1752,7 +874,7 @@ static const char *get_csr_string(int cmd)
#undef IWL_CMD
}
-void iwl_dump_csr(struct iwl_trans *trans)
+void iwl_pcie_dump_csr(struct iwl_trans *trans)
{
int i;
static const u32 csr_tbl[] = {
@@ -1809,7 +931,6 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos);
-
#define DEBUGFS_READ_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
@@ -1842,7 +963,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq;
+ struct iwl_txq *txq;
struct iwl_queue *q;
char *buf;
int pos = 0;
@@ -1879,7 +1000,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rx_queue *rxq = &trans_pcie->rxq;
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
char buf[256];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1998,7 +1119,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
if (sscanf(buf, "%d", &csr) != 1)
return -EFAULT;
- iwl_dump_csr(trans);
+ iwl_pcie_dump_csr(trans);
return count;
}
@@ -2012,7 +1133,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
int pos = 0;
ssize_t ret = -EFAULT;
- ret = pos = iwl_dump_fh(trans, &buf);
+ ret = pos = iwl_pcie_dump_fh(trans, &buf);
if (buf) {
ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos);
@@ -2081,7 +1202,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
- .send_cmd = iwl_trans_pcie_send_cmd,
+ .send_cmd = iwl_trans_pcie_send_hcmd,
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_trans_pcie_reclaim,
@@ -2091,7 +1212,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
- .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
+ .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
#ifdef CONFIG_PM_SLEEP
.suspend = iwl_trans_pcie_suspend,
@@ -2116,7 +1237,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
- if (WARN_ON(!trans))
+ if (!trans)
return NULL;
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2149,43 +1270,38 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
- dev_printk(KERN_ERR, &pdev->dev,
- "No suitable DMA available.\n");
+ dev_err(&pdev->dev, "No suitable DMA available\n");
goto out_pci_disable_device;
}
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_printk(KERN_ERR, &pdev->dev,
- "pci_request_regions failed\n");
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
goto out_pci_disable_device;
}
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
+ dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
err = -ENODEV;
goto out_pci_release_regions;
}
- dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_base = %p\n", trans_pcie->hw_base);
-
- dev_printk(KERN_INFO, &pdev->dev,
- "HW Revision ID = 0x%X\n", pdev->revision);
-
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
err = pci_enable_msi(pdev);
- if (err)
- dev_printk(KERN_ERR, &pdev->dev,
- "pci_enable_msi failed(0X%x)\n", err);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
trans->dev = &pdev->dev;
trans_pcie->irq = pdev->irq;
@@ -2195,16 +1311,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
- /* TODO: Move this away, not needed if not MSI */
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- }
-
/* Initialize the wait queue for commands */
- init_waitqueue_head(&trans->wait_command_queue);
+ init_waitqueue_head(&trans_pcie->wait_command_queue);
spin_lock_init(&trans->reg_lock);
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 79a4ddc002d3..6c5b867c353a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -42,12 +42,170 @@
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
-/**
- * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+static int iwl_queue_space(const struct iwl_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_window;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+{
+ q->n_bd = count;
+ q->n_window = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
+ * and iwl_queue_dec_wrap are broken. */
+ if (WARN_ON(!is_power_of_2(count)))
+ return -EINVAL;
+
+ /* slots_num must be power-of-two size, otherwise
+ * get_cmd_index is broken. */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(trans->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_pcie_txq_stuck_timer(unsigned long data)
+{
+ struct iwl_txq *txq = (void *)data;
+ struct iwl_queue *q = &txq->q;
+ struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+ struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ u32 scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ u8 buf[16];
+ int i;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->q.read_ptr == txq->q.write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+ jiffies_to_msecs(trans_pcie->wd_timeout));
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_read_targ_mem(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
+
+ if (i & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ i, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans,
+ SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
+ }
+
+ for (i = q->read_ptr; i != q->write_ptr;
+ i = iwl_queue_inc_wrap(i, q->n_bd)) {
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+ IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
+ get_unaligned_le32(&tx_cmd->scratch));
+ }
+
+ iwl_op_mode_nic_error(trans->op_mode);
+}
+
+/*
+ * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
-void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
+static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -88,10 +246,36 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
-/**
- * iwl_txq_update_write_ptr - Send new write index to hardware
+static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_tx_cmd *tx_cmd =
+ (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans_pcie->cmd_queue)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
-void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
+void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
{
u32 reg = 0;
int txq_id = txq->q.id;
@@ -137,7 +321,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
txq->need_update = 0;
}
-static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
@@ -149,15 +333,15 @@ static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
return addr;
}
-static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
-static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
+static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
+ dma_addr_t addr, u16 len)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
u16 hi_n_len = len << 4;
@@ -171,19 +355,20 @@ static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
tfd->num_tbs = idx + 1;
}
-static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
return tfd->num_tbs & 0x1f;
}
-static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
+ enum dma_data_direction dma_dir)
{
int i;
int num_tbs;
/* Sanity check on number of chunks */
- num_tbs = iwl_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
if (num_tbs >= IWL_NUM_OF_TBS) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
@@ -200,14 +385,14 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
- dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), dma_dir);
+ dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
+ iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
tfd->num_tbs = 0;
}
-/**
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+/*
+ * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
* @dma_dir - the direction of the DMA mapping
@@ -215,8 +400,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- enum dma_data_direction dma_dir)
+static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
@@ -227,8 +412,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
- iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
- dma_dir);
+ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
+ dma_dir);
/* free SKB */
if (txq->entries) {
@@ -247,10 +432,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
}
}
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset)
+static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ dma_addr_t addr, u16 len, u8 reset)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
@@ -263,7 +446,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
if (reset)
memset(tfd, 0, sizeof(*tfd));
- num_tbs = iwl_tfd_get_num_tbs(tfd);
+ num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IWL_NUM_OF_TBS) {
@@ -279,108 +462,534 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
IWL_ERR(trans, "Unaligned address = %llx\n",
(unsigned long long)addr);
- iwl_tfd_set_tb(tfd, num_tbs, addr, len);
+ iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
}
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
+static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+ struct iwl_txq *txq, int slots_num,
+ u32 txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
+ int i;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
+ (unsigned long)txq);
+ txq->trans_pcie = trans_pcie;
+
+ txq->q.n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = txq_id;
+
+ return 0;
+error:
+ if (txq->entries && txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = 0;
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+ txq_id);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/*
+ * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ enum dma_data_direction dma_dir;
+
+ if (!q->n_bd)
+ return;
+
+ /* In the command queue, all the TBs are mapped as BIDI
+ * so unmap them as such.
+ */
+ if (txq_id == trans_pcie->cmd_queue)
+ dma_dir = DMA_BIDIRECTIONAL;
+ else
+ dma_dir = DMA_TO_DEVICE;
+
+ spin_lock_bh(&txq->lock);
+ while (q->write_ptr != q->read_ptr) {
+ iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+ spin_unlock_bh(&txq->lock);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
*
- ***************************************************/
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct device *dev = trans->dev;
+ int i;
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_pcie_txq_unmap(trans, txq_id);
-int iwl_queue_space(const struct iwl_queue *q)
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->cmd_queue)
+ for (i = 0; i < txq->q.n_window; i++) {
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries[i].copy_cmd);
+ kfree(txq->entries[i].free_buf);
+ }
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd) {
+ dma_free_coherent(dev, sizeof(struct iwl_tfd) *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ }
+
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ del_timer_sync(&txq->stuck_timer);
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ */
+static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
- int s = q->read_ptr - q->write_ptr;
+ struct iwl_trans_pcie __maybe_unused *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
+ iwl_write_prph(trans, SCD_TXFACT, mask);
+}
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 a;
+ int chan;
+ u32 reg_val;
+
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ trans_pcie->scd_base_addr =
+ iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+
+ WARN_ON(scd_base_addr != 0 &&
+ scd_base_addr != trans_pcie->scd_base_addr);
+
+ a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ /* reset conext data memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+ /* reset tx status memory */
+ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+ for (; a < trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(
+ trans->cfg->base_params->num_of_queues);
+ a += 4)
+ iwl_write_targ_mem(trans, a, 0);
+
+ iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
+ /* The chain extension of the SCD doesn't work well. This feature is
+ * enabled by default by the HW, so we need to disable it manually.
+ */
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+ iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+ trans_pcie->cmd_fifo);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
+ iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
-/**
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+/*
+ * iwl_pcie_tx_stop - Stop all Tx DMA channels
*/
-int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
+int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ch, txq_id, ret;
+ unsigned long flags;
- /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
- * and iwl_queue_dec_wrap are broken. */
- if (WARN_ON(!is_power_of_2(count)))
- return -EINVAL;
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
- /* slots_num must be power-of-two size, otherwise
- * get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
+ iwl_pcie_txq_set_sched(trans, 0);
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write_direct32(trans,
+ FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
+ if (ret < 0)
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch,
+ iwl_read_direct32(trans,
+ FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
+ if (!trans_pcie->txq) {
+ IWL_WARN(trans,
+ "Stopping tx queues that aren't allocated...\n");
+ return 0;
+ }
- q->write_ptr = q->read_ptr = 0;
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++)
+ iwl_pcie_txq_unmap(trans, txq_id);
return 0;
}
-static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+/*
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_pcie_tx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->q.read_ptr].cmd->payload;
+ int txq_id;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+ /* Tx queues */
+ if (trans_pcie->txq) {
+ for (txq_id = 0;
+ txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
+ iwl_pcie_txq_free(trans, txq_id);
+ }
- if (txq_id != trans_pcie->cmd_queue)
- sta_id = tx_cmd->sta_id;
+ kfree(trans_pcie->txq);
+ trans_pcie->txq = NULL;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/*
+ * iwl_pcie_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ */
+static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
+{
+ int ret;
+ int txq_id, slots_num;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(trans_pcie->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(trans, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
+ if (!trans_pcie->txq) {
+ IWL_ERR(trans, "Not enough memory for txq\n");
+ ret = ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ iwl_pcie_tx_free(trans);
+
+ return ret;
+}
+int iwl_pcie_tx_init(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+ bool alloc = false;
+
+ if (!trans_pcie->txq) {
+ ret = iwl_pcie_tx_alloc(trans);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_write_prph(trans, SCD_TXFACT, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ slots_num = (txq_id == trans_pcie->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
+ slots_num, txq_id);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ iwl_pcie_tx_free(trans);
+ return ret;
+}
+
+static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq)
+{
+ if (!trans_pcie->wd_timeout)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->q.read_ptr == txq->q.write_ptr)
+ del_timer(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ /* n_bd is usually 256 => n_bd - 1 = 0xff */
+ int tfd_num = ssn & (txq->q.n_bd - 1);
+ struct iwl_queue *q = &txq->q;
+ int last_to_free;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+ return;
+
+ spin_lock(&txq->lock);
+
+ if (txq->q.read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+ txq_id, txq->q.read_ptr, tfd_num, ssn);
+
+ /*Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used */
+ last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
+
+ if (!iwl_queue_used(q, last_to_free)) {
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ q->read_ptr != tfd_num;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
+ continue;
+
+ __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+
+ txq->entries[txq->q.read_ptr].skb = NULL;
+
+ iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+
+ iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+ }
+
+ iwl_pcie_txq_progress(trans_pcie, txq);
+
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
+out:
+ spin_unlock(&txq->lock);
}
-static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+/*
+ * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ int nfreed = 0;
+
+ lockdep_assert_held(&txq->lock);
+
+ if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+ idx, q->write_ptr, q->read_ptr);
+ iwl_op_mode_nic_error(trans->op_mode);
+ }
+ }
+
+ iwl_pcie_txq_progress(trans_pcie, txq);
+}
+
+static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
u16 txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -405,7 +1014,8 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
return 0;
}
-static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
+static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
+ u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -424,7 +1034,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
/* Stop this Tx queue before configuring it */
- iwl_txq_set_inactive(trans, txq_id);
+ iwl_pcie_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD queue */
if (txq_id != trans_pcie->cmd_queue)
@@ -435,7 +1045,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
u16 ra_tid = BUILD_RAxTID(sta_id, tid);
/* Map receiver-address / traffic-ID to this queue */
- iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
+ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
/* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
@@ -480,20 +1090,29 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 stts_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+ static const u32 zero_val[4] = {};
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- iwl_txq_set_inactive(trans, txq_id);
+ iwl_pcie_txq_set_inactive(trans, txq_id);
+
+ _iwl_write_targ_mem_dwords(trans, stts_addr,
+ zero_val, ARRAY_SIZE(zero_val));
+
+ iwl_pcie_txq_unmap(trans, txq_id);
+
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-/**
- * iwl_enqueue_hcmd - enqueue a uCode command
+/*
+ * iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
* @cmd: a point to the ucode command structure
*
@@ -501,15 +1120,17 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
* failed. On success, it turns the index (> 0) of command in the
* command queue.
*/
-static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ void *dup_buf = NULL;
dma_addr_t phys_addr;
- u32 idx;
+ int idx;
u16 copy_size, cmd_size;
bool had_nocopy = false;
int i;
@@ -526,10 +1147,33 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true;
+ if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+ } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+ /*
+ * This is also a chunk that isn't copied
+ * to the static buffer so set had_nocopy.
+ */
+ had_nocopy = true;
+
+ /* only allowed once */
+ if (WARN_ON(dup_buf)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
+
+ dup_buf = kmemdup(cmd->data[i], cmd->len[i],
+ GFP_ATOMIC);
+ if (!dup_buf)
+ return -ENOMEM;
} else {
/* NOCOPY must not be followed by normal! */
- if (WARN_ON(had_nocopy))
- return -EINVAL;
+ if (WARN_ON(had_nocopy)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
copy_size += cmd->len[i];
}
cmd_size += cmd->len[i];
@@ -541,8 +1185,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
* allocated into separate TFDs, then we will need to
* increase the size of the buffers.
*/
- if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
- return -EINVAL;
+ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+ "Command %s (%#x) is too large (%d bytes)\n",
+ get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
+ idx = -EINVAL;
+ goto free_dup_buf;
+ }
spin_lock_bh(&txq->lock);
@@ -551,7 +1199,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_ERR(trans, "No space in command queue\n");
iwl_op_mode_cmd_queue_full(trans->op_mode);
- return -ENOSPC;
+ idx = -ENOSPC;
+ goto free_dup_buf;
}
idx = get_cmd_index(q, q->write_ptr);
@@ -575,7 +1224,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
if (!cmd->len[i])
continue;
- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+ if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP))
break;
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
cmd_pos += cmd->len[i];
@@ -600,7 +1250,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_DEBUG_HC(trans,
"Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
- trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+ get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
@@ -614,28 +1264,35 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, copy_size);
- iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+ const void *data = cmd->data[i];
+
if (!cmd->len[i])
continue;
- if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+ IWL_HCMD_DFL_DUP)))
continue;
- phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+ data = dup_buf;
+ phys_addr = dma_map_single(trans->dev, (void *)data,
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_unmap_tfd(trans, out_meta,
- &txq->tfds[q->write_ptr],
- DMA_BIDIRECTIONAL);
+ iwl_pcie_tfd_unmap(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+ DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
- iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
- cmd->len[i], 0);
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
}
out_meta->flags = cmd->flags;
+ if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+ kfree(txq->entries[idx].free_buf);
+ txq->entries[idx].free_buf = dup_buf;
txq->need_update = 1;
@@ -648,70 +1305,18 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(trans, txq);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
out:
spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+ if (idx < 0)
+ kfree(dup_buf);
return idx;
}
-static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
- struct iwl_tx_queue *txq)
-{
- if (!trans_pcie->wd_timeout)
- return;
-
- /*
- * if empty delete timer, otherwise move timer forward
- * since we're making progress on this queue
- */
- if (txq->q.read_ptr == txq->q.write_ptr)
- del_timer(&txq->stuck_timer);
- else
- mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
-}
-
-/**
- * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
- int idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- lockdep_assert_held(&txq->lock);
-
- if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(trans,
- "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx, q->n_bd,
- q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
- idx, q->write_ptr, q->read_ptr);
- iwl_op_mode_nic_error(trans->op_mode);
- }
-
- }
-
- iwl_queue_progress(trans_pcie, txq);
-}
-
-/**
- * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+/*
+ * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers)
@@ -720,8 +1325,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
-void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
- int handler_status)
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+ struct iwl_rx_cmd_buffer *rxb, int handler_status)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -731,7 +1336,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
+ struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
@@ -751,7 +1356,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
- iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
+ iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -763,20 +1368,18 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
meta->source->handler_status = handler_status;
}
- iwl_hcmd_queue_reclaim(trans, txq_id, index);
+ iwl_pcie_cmdq_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie,
- cmd->hdr.cmd));
+ get_cmd_string(trans_pcie, cmd->hdr.cmd));
}
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie,
- cmd->hdr.cmd));
- wake_up(&trans->wait_command_queue);
+ get_cmd_string(trans_pcie, cmd->hdr.cmd));
+ wake_up(&trans_pcie->wait_command_queue);
}
meta->flags = 0;
@@ -786,7 +1389,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -795,59 +1399,59 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
return -EINVAL;
-
- ret = iwl_enqueue_hcmd(trans, cmd);
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
if (ret < 0) {
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+ get_cmd_string(trans_pcie, cmd->id), ret);
return ret;
}
return 0;
}
-static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status))) {
IWL_ERR(trans, "Command %s: a command is already active!\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
return -EIO;
}
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
- cmd_idx = iwl_enqueue_hcmd(trans, cmd);
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
IWL_ERR(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
+ get_cmd_string(trans_pcie, cmd->id), ret);
return ret;
}
- ret = wait_event_timeout(trans->wait_command_queue,
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
!test_bit(STATUS_HCMD_ACTIVE,
&trans_pcie->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
- struct iwl_tx_queue *txq =
+ struct iwl_txq *txq =
&trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;
IWL_ERR(trans,
"Error sending %s: time out after %dms.\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id),
+ get_cmd_string(trans_pcie, cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans,
@@ -857,16 +1461,28 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
IWL_DEBUG_INFO(trans,
"Clearing HCMD_ACTIVE for command %s\n",
- trans_pcie_get_cmd_string(trans_pcie,
- cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
goto cancel;
}
}
+ if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n",
+ get_cmd_string(trans_pcie, cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
- trans_pcie_get_cmd_string(trans_pcie, cmd->id));
+ get_cmd_string(trans_pcie, cmd->id));
ret = -EIO;
goto cancel;
}
@@ -893,64 +1509,183 @@ cancel:
return ret;
}
-int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
+ return -EIO;
+
+ if (test_bit(STATUS_RFKILL, &trans_pcie->status))
+ return -ERFKILL;
+
if (cmd->flags & CMD_ASYNC)
- return iwl_send_cmd_async(trans, cmd);
+ return iwl_pcie_send_hcmd_async(trans, cmd);
- return iwl_send_cmd_sync(trans, cmd);
+ /* We still can fail on RFKILL that can be asserted while we wait */
+ return iwl_pcie_send_hcmd_sync(trans, cmd);
}
-/* Frees buffers until index _not_ inclusive */
-int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
- struct sk_buff_head *skbs)
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int last_to_free;
- int freed = 0;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq;
+ struct iwl_queue *q;
+ dma_addr_t phys_addr = 0;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u8 wait_write_ptr = 0;
+ __le16 fc = hdr->frame_control;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 __maybe_unused wifi_seq;
+
+ txq = &trans_pcie->txq[txq_id];
+ q = &txq->q;
- /* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans_pcie->cmd_queue))
- return 0;
+ if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
- lockdep_assert_held(&txq->lock);
+ spin_lock(&txq->lock);
- /*Since we free until index _not_ inclusive, the one before index is
- * the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
+ ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
+ /* Set up driver data for this TFD */
+ txq->entries[q->write_ptr].skb = skb;
+ txq->entries[q->write_ptr].cmd = dev_cmd;
+
+ dev_cmd->hdr.cmd = REPLY_TX;
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[q->write_ptr].meta;
- if ((index >= q->n_bd) ||
- (iwl_queue_used(q, last_to_free) == 0)) {
- IWL_ERR(trans,
- "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
- q->write_ptr, q->read_ptr);
- return 0;
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = dma_map_single(trans->dev,
+ &dev_cmd->hdr, firstlen,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
+ goto out_err;
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+
+ if (!ieee80211_has_morefrags(fc)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
}
- if (WARN_ON(!skb_queue_empty(skbs)))
- return 0;
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
+ secondlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
+ dma_unmap_single(trans->dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ DMA_BIDIRECTIONAL);
+ goto out_err;
+ }
+ }
- for (;
- q->read_ptr != index;
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+ /* Attach buffers to TFD */
+ iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
+ if (secondlen > 0)
+ iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
- if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
- continue;
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
- __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
+ /* take back ownership of DMA buffer to enable update */
+ dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
- txq->entries[txq->q.read_ptr].skb = NULL;
+ IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
+ le16_to_cpu(dev_cmd->hdr.sequence));
+ IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
- iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
- freed++;
- }
+ dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(trans->dev, skb,
+ &txq->tfds[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+ trace_iwlwifi_dev_tx_data(trans->dev, skb,
+ skb->data + hdr_len, secondlen);
- iwl_queue_progress(trans_pcie, txq);
+ /* start timer if queue currently empty */
+ if (txq->need_update && q->read_ptr == q->write_ptr &&
+ trans_pcie->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
- return freed;
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+ if (iwl_queue_space(q) < q->high_mark) {
+ if (wait_write_ptr) {
+ txq->need_update = 1;
+ iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ } else {
+ iwl_stop_queue(trans, txq);
+ }
+ }
+ spin_unlock(&txq->lock);
+ return 0;
+out_err:
+ spin_unlock(&txq->lock);
+ return -1;
}