diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-12 01:27:06 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-12 01:27:06 +0300 |
commit | 70e71ca0af244f48a5dcf56dc435243792e3a495 (patch) | |
tree | f7d9c4c4d9a857a00043e9bf6aa2d6f533a34778 /drivers/net/wireless/ath/ath10k | |
parent | bae41e45b7400496b9bf0c70c6004419d9987819 (diff) | |
parent | 00c83b01d58068dfeb2e1351cca6fccf2a83fa8f (diff) | |
download | linux-70e71ca0af244f48a5dcf56dc435243792e3a495.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) New offloading infrastructure and example 'rocker' driver for
offloading of switching and routing to hardware.
This work was done by a large group of dedicated individuals, not
limited to: Scott Feldman, Jiri Pirko, Thomas Graf, John Fastabend,
Jamal Hadi Salim, Andy Gospodarek, Florian Fainelli, Roopa Prabhu
2) Start making the networking operate on IOV iterators instead of
modifying iov objects in-situ during transfers. Thanks to Al Viro
and Herbert Xu.
3) A set of new netlink interfaces for the TIPC stack, from Richard
Alpe.
4) Remove unnecessary looping during ipv6 routing lookups, from Martin
KaFai Lau.
5) Add PAUSE frame generation support to gianfar driver, from Matei
Pavaluca.
6) Allow for larger reordering levels in TCP, which are easily
achievable in the real world right now, from Eric Dumazet.
7) Add a variable of napi_schedule that doesn't need to disable cpu
interrupts, from Eric Dumazet.
8) Use a doubly linked list to optimize neigh_parms_release(), from
Nicolas Dichtel.
9) Various enhancements to the kernel BPF verifier, and allow eBPF
programs to actually be attached to sockets. From Alexei
Starovoitov.
10) Support TSO/LSO in sunvnet driver, from David L Stevens.
11) Allow controlling ECN usage via routing metrics, from Florian
Westphal.
12) Remote checksum offload, from Tom Herbert.
13) Add split-header receive, BQL, and xmit_more support to amd-xgbe
driver, from Thomas Lendacky.
14) Add MPLS support to openvswitch, from Simon Horman.
15) Support wildcard tunnel endpoints in ipv6 tunnels, from Steffen
Klassert.
16) Do gro flushes on a per-device basis using a timer, from Eric
Dumazet. This tries to resolve the conflicting goals between the
desired handling of bulk vs. RPC-like traffic.
17) Allow userspace to ask for the CPU upon what a packet was
received/steered, via SO_INCOMING_CPU. From Eric Dumazet.
18) Limit GSO packets to half the current congestion window, from Eric
Dumazet.
19) Add a generic helper so that all drivers set their RSS keys in a
consistent way, from Eric Dumazet.
20) Add xmit_more support to enic driver, from Govindarajulu
Varadarajan.
21) Add VLAN packet scheduler action, from Jiri Pirko.
22) Support configurable RSS hash functions via ethtool, from Eyal
Perry.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1820 commits)
Fix race condition between vxlan_sock_add and vxlan_sock_release
net/macb: fix compilation warning for print_hex_dump() called with skb->mac_header
net/mlx4: Add support for A0 steering
net/mlx4: Refactor QUERY_PORT
net/mlx4_core: Add explicit error message when rule doesn't meet configuration
net/mlx4: Add A0 hybrid steering
net/mlx4: Add mlx4_bitmap zone allocator
net/mlx4: Add a check if there are too many reserved QPs
net/mlx4: Change QP allocation scheme
net/mlx4_core: Use tasklet for user-space CQ completion events
net/mlx4_core: Mask out host side virtualization features for guests
net/mlx4_en: Set csum level for encapsulated packets
be2net: Export tunnel offloads only when a VxLAN tunnel is created
gianfar: Fix dma check map error when DMA_API_DEBUG is enabled
cxgb4/csiostor: Don't use MASTER_MUST for fw_hello call
net: fec: only enable mdio interrupt before phy device link up
net: fec: clear all interrupt events to support i.MX6SX
net: fec: reset fep link status in suspend function
net: sock: fix access via invalid file descriptor
net: introduce helper macro for_each_cmsghdr
...
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
21 files changed, 3807 insertions, 2298 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 101cadb6e4ba..a156e6e48708 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -443,12 +443,12 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) * Guts of ath10k_ce_completed_recv_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp, - unsigned int *flagsp) +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp) { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; @@ -558,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, /* sanity */ dest_ring->per_transfer_context[sw_index] = NULL; + desc->nbytes = 0; /* Update sw_index */ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); @@ -576,11 +577,11 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp, - u32 *bufferp, - unsigned int *nbytesp, - unsigned int *transfer_idp) +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp) { struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; @@ -817,7 +818,10 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ce_id; - for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + /* Skip the last copy engine, CE7 the diagnostic window, as that + * uses polling and isn't initialized for interrupts. + */ + for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); } @@ -832,8 +836,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, nentries = roundup_pow_of_two(attr->src_nentries); - memset(src_ring->per_transfer_context, 0, - nentries * sizeof(*src_ring->per_transfer_context)); + memset(src_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; @@ -869,8 +873,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, nentries = roundup_pow_of_two(attr->dest_nentries); - memset(dest_ring->per_transfer_context, 0, - nentries * sizeof(*dest_ring->per_transfer_context)); + memset(dest_ring->base_addr_owner_space, 0, + nentries * sizeof(struct ce_desc)); dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); dest_ring->sw_index &= dest_ring->nentries_mask; @@ -1020,37 +1024,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * initialized by software/firmware. */ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr, - void (*send_cb)(struct ath10k_ce_pipe *), - void (*recv_cb)(struct ath10k_ce_pipe *)) + const struct ce_attr *attr) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; int ret; - /* - * Make sure there's enough CE ringbuffer entries for HTT TX to avoid - * additional TX locking checks. - * - * For the lack of a better place do the check here. - */ - BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > - (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - - spin_lock_bh(&ar_pci->ce_lock); - ce_state->ar = ar; - ce_state->id = ce_id; - ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); - ce_state->attr_flags = attr->flags; - ce_state->src_sz_max = attr->src_sz_max; - if (attr->src_nentries) - ce_state->send_cb = send_cb; - if (attr->dest_nentries) - ce_state->recv_cb = recv_cb; - spin_unlock_bh(&ar_pci->ce_lock); - if (attr->src_nentries) { ret = ath10k_ce_init_src_ring(ar, ce_id, attr); if (ret) { @@ -1098,12 +1075,37 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) } int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr) + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; int ret; + /* + * Make sure there's enough CE ringbuffer entries for HTT TX to avoid + * additional TX locking checks. + * + * For the lack of a better place do the check here. + */ + BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > + (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); + + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; + + if (attr->src_nentries) + ce_state->send_cb = send_cb; + + if (attr->dest_nentries) + ce_state->recv_cb = recv_cb; + if (attr->src_nentries) { ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); if (IS_ERR(ce_state->src_ring)) { diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 329b7340fa72..617a151e8ce4 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -192,15 +192,21 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, unsigned int *nbytesp, unsigned int *transfer_idp); +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp); + /*==================CE Engine Initialization=======================*/ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr, - void (*send_cb)(struct ath10k_ce_pipe *), - void (*recv_cb)(struct ath10k_ce_pipe *)); + const struct ce_attr *attr); void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, - const struct ce_attr *attr); + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)); void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id); /*==================CE Engine Shutdown=======================*/ @@ -213,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp); +int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp, + u32 *bufferp, + unsigned int *nbytesp, + unsigned int *transfer_idp, + unsigned int *flagsp); + /* * Support clean shutdown by allowing the caller to cancel * pending sends. Target DMA must be stopped before using diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index cee18c89d7f2..7762061a1944 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -31,12 +31,17 @@ unsigned int ath10k_debug_mask; static bool uart_print; static unsigned int ath10k_p2p; +static bool skip_otp; + module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); module_param(uart_print, bool, 0644); module_param_named(p2p, ath10k_p2p, uint, 0644); +module_param(skip_otp, bool, 0644); + MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(p2p, "Enable ath10k P2P support"); +MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { @@ -138,7 +143,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, return fw; } -static int ath10k_push_board_ext_data(struct ath10k *ar) +static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data, + size_t data_len) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; @@ -159,14 +165,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) if (board_ext_data_addr == 0) return 0; - if (ar->board_len != (board_data_size + board_ext_data_size)) { + if (data_len != (board_data_size + board_ext_data_size)) { ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", - ar->board_len, board_data_size, board_ext_data_size); + data_len, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, - ar->board_data + board_data_size, + data + board_data_size, board_ext_data_size); if (ret) { ath10k_err(ar, "could not write board ext data (%d)\n", ret); @@ -184,13 +190,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) return 0; } -static int ath10k_download_board_data(struct ath10k *ar) +static int ath10k_download_board_data(struct ath10k *ar, const void *data, + size_t data_len) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 address; int ret; - ret = ath10k_push_board_ext_data(ar); + ret = ath10k_push_board_ext_data(ar, data, data_len); if (ret) { ath10k_err(ar, "could not push board ext data (%d)\n", ret); goto exit; @@ -202,9 +209,9 @@ static int ath10k_download_board_data(struct ath10k *ar) goto exit; } - ret = ath10k_bmi_write_memory(ar, address, ar->board_data, + ret = ath10k_bmi_write_memory(ar, address, data, min_t(u32, board_data_size, - ar->board_len)); + data_len)); if (ret) { ath10k_err(ar, "could not write board data (%d)\n", ret); goto exit; @@ -220,11 +227,39 @@ exit: return ret; } +static int ath10k_download_cal_file(struct ath10k *ar) +{ + int ret; + + if (!ar->cal_file) + return -ENOENT; + + if (IS_ERR(ar->cal_file)) + return PTR_ERR(ar->cal_file); + + ret = ath10k_download_board_data(ar, ar->cal_file->data, + ar->cal_file->size); + if (ret) { + ath10k_err(ar, "failed to download cal_file data: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n"); + + return 0; +} + static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; int ret; + ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); + if (ret) { + ath10k_err(ar, "failed to download board data: %d\n", ret); + return ret; + } + /* OTP is optional */ if (!ar->otp_data || !ar->otp_len) { @@ -250,7 +285,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); - if (result != 0) { + if (!skip_otp && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } @@ -308,6 +343,9 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) if (ar->firmware && !IS_ERR(ar->firmware)) release_firmware(ar->firmware); + if (ar->cal_file && !IS_ERR(ar->cal_file)) + release_firmware(ar->cal_file); + ar->board = NULL; ar->board_data = NULL; ar->board_len = 0; @@ -319,6 +357,27 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ar->firmware = NULL; ar->firmware_data = NULL; ar->firmware_len = 0; + + ar->cal_file = NULL; +} + +static int ath10k_fetch_cal_file(struct ath10k *ar) +{ + char filename[100]; + + /* cal-<bus>-<id>.bin */ + scnprintf(filename, sizeof(filename), "cal-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + + ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename); + if (IS_ERR(ar->cal_file)) + /* calibration file is optional, don't print any warnings */ + return PTR_ERR(ar->cal_file); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n", + ATH10K_FW_DIR, filename); + + return 0; } static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) @@ -562,6 +621,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret; + /* calibration file is optional, don't check for any errors */ + ath10k_fetch_cal_file(ar); + ar->fw_api = 3; ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); @@ -589,30 +651,32 @@ success: return 0; } -static int ath10k_init_download_firmware(struct ath10k *ar, - enum ath10k_firmware_mode mode) +static int ath10k_download_cal_data(struct ath10k *ar) { int ret; - ret = ath10k_download_board_data(ar); - if (ret) { - ath10k_err(ar, "failed to download board data: %d\n", ret); - return ret; + ret = ath10k_download_cal_file(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_FILE; + goto done; } + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find a calibration file, try OTP next: %d\n", + ret); + ret = ath10k_download_and_run_otp(ar); if (ret) { ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; } - ret = ath10k_download_fw(ar, mode); - if (ret) { - ath10k_err(ar, "failed to download firmware: %d\n", ret); - return ret; - } + ar->cal_mode = ATH10K_CAL_MODE_OTP; - return ret; +done: + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n", + ath10k_cal_mode_str(ar->cal_mode)); + return 0; } static int ath10k_init_uart(struct ath10k *ar) @@ -685,6 +749,25 @@ static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); + set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + + /* Place a barrier to make sure the compiler doesn't reorder + * CRASH_FLUSH and calling other functions. + */ + barrier(); + + ieee80211_stop_queues(ar->hw); + ath10k_drain_tx(ar); + complete_all(&ar->scan.started); + complete_all(&ar->scan.completed); + complete_all(&ar->scan.on_channel); + complete_all(&ar->offchan_tx_completed); + complete_all(&ar->install_key_done); + complete_all(&ar->vdev_setup_done); + wake_up(&ar->htt.empty_tx_wq); + wake_up(&ar->wmi.tx_credits_wq); + wake_up(&ar->peer_mapping_wq); + mutex_lock(&ar->conf_mutex); switch (ar->state) { @@ -716,12 +799,25 @@ static void ath10k_core_restart(struct work_struct *work) mutex_unlock(&ar->conf_mutex); } +static void ath10k_core_init_max_sta_count(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ar->max_num_peers = TARGET_10X_NUM_PEERS; + ar->max_num_stations = TARGET_10X_NUM_STATIONS; + } else { + ar->max_num_peers = TARGET_NUM_PEERS; + ar->max_num_stations = TARGET_NUM_STATIONS; + } +} + int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) { int status; lockdep_assert_held(&ar->conf_mutex); + clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); + ath10k_bmi_start(ar); if (ath10k_init_configure_target(ar)) { @@ -729,7 +825,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err; } - status = ath10k_init_download_firmware(ar, mode); + status = ath10k_download_cal_data(ar); + if (status) + goto err; + + status = ath10k_download_fw(ar, mode); if (status) goto err; @@ -846,9 +946,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) goto err_hif_stop; if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; + ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1; else - ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1; INIT_LIST_HEAD(&ar->arvifs); @@ -946,6 +1046,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar) return ret; } + ath10k_core_init_max_sta_count(ar); + mutex_lock(&ar->conf_mutex); ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); @@ -1084,6 +1186,7 @@ void ath10k_core_unregister(struct ath10k *ar) EXPORT_SYMBOL(ath10k_core_unregister); struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, + enum ath10k_bus bus, const struct ath10k_hif_ops *hif_ops) { struct ath10k *ar; @@ -1100,6 +1203,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->dev = dev; ar->hif.ops = hif_ops; + ar->hif.bus = bus; init_completion(&ar->scan.started); init_completion(&ar->scan.completed); @@ -1120,6 +1224,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, INIT_LIST_HEAD(&ar->peers); init_waitqueue_head(&ar->peer_mapping_wq); + init_waitqueue_head(&ar->htt.empty_tx_wq); + init_waitqueue_head(&ar->wmi.tx_credits_wq); init_completion(&ar->offchan_tx_completed); INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index fe531ea6926c..514c219263a5 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -63,12 +63,28 @@ struct ath10k; +enum ath10k_bus { + ATH10K_BUS_PCI, +}; + +static inline const char *ath10k_bus_str(enum ath10k_bus bus) +{ + switch (bus) { + case ATH10K_BUS_PCI: + return "pci"; + } + + return "unknown"; +} + struct ath10k_skb_cb { dma_addr_t paddr; + u8 eid; u8 vdev_id; struct { u8 tid; + u16 freq; bool is_offchan; struct ath10k_htt_txbuf *txbuf; u32 txbuf_paddr; @@ -96,8 +112,6 @@ struct ath10k_bmi { bool done_sent; }; -#define ATH10K_MAX_MEM_REQS 16 - struct ath10k_mem_chunk { void *vaddr; dma_addr_t paddr; @@ -110,22 +124,27 @@ struct ath10k_wmi { struct completion service_ready; struct completion unified_ready; wait_queue_head_t tx_credits_wq; + DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX); struct wmi_cmd_map *cmd; struct wmi_vdev_param_map *vdev_param; struct wmi_pdev_param_map *pdev_param; u32 num_mem_chunks; - struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS]; + struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; }; -struct ath10k_peer_stat { +struct ath10k_fw_stats_peer { + struct list_head list; + u8 peer_macaddr[ETH_ALEN]; u32 peer_rssi; u32 peer_tx_rate; u32 peer_rx_rate; /* 10x only */ }; -struct ath10k_target_stats { +struct ath10k_fw_stats_pdev { + struct list_head list; + /* PDEV stats */ s32 ch_noise_floor; u32 tx_frame_count; @@ -180,15 +199,11 @@ struct ath10k_target_stats { s32 phy_errs; s32 phy_err_drop; s32 mpdu_errs; +}; - /* VDEV STATS */ - - /* PEER STATS */ - u8 peers; - struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS]; - - /* TODO: Beacon filter stats */ - +struct ath10k_fw_stats { + struct list_head pdevs; + struct list_head peers; }; struct ath10k_dfs_stats { @@ -206,6 +221,8 @@ struct ath10k_peer { int vdev_id; u8 addr[ETH_ALEN]; DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS); + + /* protected by ar->data_lock */ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; }; @@ -234,6 +251,8 @@ struct ath10k_vif { struct sk_buff *beacon; /* protected by data_lock */ bool beacon_sent; + void *beacon_buf; + dma_addr_t beacon_paddr; struct ath10k *ar; struct ieee80211_vif *vif; @@ -273,6 +292,7 @@ struct ath10k_vif { u8 force_sgi; bool use_cts_prot; int num_legacy_stations; + int txpower; }; struct ath10k_vif_iter { @@ -292,17 +312,19 @@ struct ath10k_fw_crash_data { struct ath10k_debug { struct dentry *debugfs_phy; - struct ath10k_target_stats target_stats; - DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX); - - struct completion event_stats_compl; + struct ath10k_fw_stats fw_stats; + struct completion fw_stats_complete; + bool fw_stats_done; unsigned long htt_stats_mask; struct delayed_work htt_stats_dwork; struct ath10k_dfs_stats dfs_stats; struct ath_dfs_pool_stats dfs_pool_stats; + /* protected by conf_mutex */ u32 fw_dbglog_mask; + u32 pktlog_filter; + u32 reg_addr; u8 htt_max_amsdu; u8 htt_max_ampdu; @@ -321,7 +343,7 @@ enum ath10k_state { * stopped in ath10k_core_restart() work holding conf_mutex. The state * RESTARTED means that the device is up and mac80211 has started hw * reconfiguration. Once mac80211 is done with the reconfiguration we - * set the state to STATE_ON in restart_complete(). */ + * set the state to STATE_ON in reconfig_complete(). */ ATH10K_STATE_RESTARTING, ATH10K_STATE_RESTARTED, @@ -369,8 +391,30 @@ enum ath10k_dev_flags { /* Indicates that ath10k device is during CAC phase of DFS */ ATH10K_CAC_RUNNING, ATH10K_FLAG_CORE_REGISTERED, + + /* Device has crashed and needs to restart. This indicates any pending + * waiters should immediately cancel instead of waiting for a time out. + */ + ATH10K_FLAG_CRASH_FLUSH, }; +enum ath10k_cal_mode { + ATH10K_CAL_MODE_FILE, + ATH10K_CAL_MODE_OTP, +}; + +static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) +{ + switch (mode) { + case ATH10K_CAL_MODE_FILE: + return "file"; + case ATH10K_CAL_MODE_OTP: + return "otp"; + } + + return "unknown"; +} + enum ath10k_scan_state { ATH10K_SCAN_IDLE, ATH10K_SCAN_STARTING, @@ -421,6 +465,7 @@ struct ath10k { bool p2p; struct { + enum ath10k_bus bus; const struct ath10k_hif_ops *ops; } hif; @@ -456,7 +501,10 @@ struct ath10k { const void *firmware_data; size_t firmware_len; + const struct firmware *cal_file; + int fw_api; + enum ath10k_cal_mode cal_mode; struct { struct completion started; @@ -482,7 +530,7 @@ struct ath10k { /* current operating channel definition */ struct cfg80211_chan_def chandef; - int free_vdev_map; + unsigned long long free_vdev_map; bool monitor; int monitor_vdev_id; bool monitor_started; @@ -517,8 +565,12 @@ struct ath10k { struct list_head peers; wait_queue_head_t peer_mapping_wq; - /* number of created peers; protected by data_lock */ + /* protected by conf_mutex */ int num_peers; + int num_stations; + + int max_num_peers; + int max_num_stations; struct work_struct offchan_tx_work; struct sk_buff_head offchan_tx_queue; @@ -563,11 +615,19 @@ struct ath10k { bool utf_monitor; } testmode; + struct { + /* protected by data_lock */ + u32 fw_crash_counter; + u32 fw_warm_reset_counter; + u32 fw_cold_reset_counter; + } stats; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, + enum ath10k_bus bus, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3756feba3223..a716758f14b0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -17,12 +17,12 @@ #include <linux/module.h> #include <linux/debugfs.h> -#include <linux/version.h> -#include <linux/vermagic.h> #include <linux/vmalloc.h> +#include <linux/utsname.h> #include "core.h" #include "debug.h" +#include "hif.h" /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 @@ -106,34 +106,37 @@ struct ath10k_dump_file_data { u8 data[0]; } __packed; -int ath10k_info(struct ath10k *ar, const char *fmt, ...) +void ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = dev_info(ar->dev, "%pV", &vaf); + dev_info(ar->dev, "%pV", &vaf); trace_ath10k_log_info(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_info); void ath10k_print_driver_info(struct ath10k *ar) { - ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", + ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s max_sta %d\n", ar->hw_params.name, ar->target_version, ar->chip_id, ar->hw->wiphy->fw_version, ar->fw_api, ar->htt.target_version_major, - ar->htt.target_version_minor); + ar->htt.target_version_minor, + ar->fw_version_major, + ar->fw_version_minor, + ar->fw_version_release, + ar->fw_version_build, + ath10k_cal_mode_str(ar->cal_mode), + ar->max_num_stations); ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", config_enabled(CONFIG_ATH10K_DEBUG), config_enabled(CONFIG_ATH10K_DEBUGFS), @@ -143,25 +146,22 @@ void ath10k_print_driver_info(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_print_driver_info); -int ath10k_err(struct ath10k *ar, const char *fmt, ...) +void ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret; va_start(args, fmt); vaf.va = &args; - ret = dev_err(ar->dev, "%pV", &vaf); + dev_err(ar->dev, "%pV", &vaf); trace_ath10k_log_err(ar, &vaf); va_end(args); - - return ret; } EXPORT_SYMBOL(ath10k_err); -int ath10k_warn(struct ath10k *ar, const char *fmt, ...) +void ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -174,20 +174,11 @@ int ath10k_warn(struct ath10k *ar, const char *fmt, ...) trace_ath10k_log_warn(ar, &vaf); va_end(args); - - return 0; } EXPORT_SYMBOL(ath10k_warn); #ifdef CONFIG_ATH10K_DEBUGFS -void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size) -{ - memcpy(ar->debug.wmi_service_bitmap, service_map, map_size); -} - static ssize_t ath10k_read_wmi_services(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -209,8 +200,9 @@ static ssize_t ath10k_read_wmi_services(struct file *file, if (len > buf_len) len = buf_len; + spin_lock_bh(&ar->data_lock); for (i = 0; i < WMI_SERVICE_MAX; i++) { - enabled = test_bit(i, ar->debug.wmi_service_bitmap); + enabled = test_bit(i, ar->wmi.svc_map); name = wmi_service_name(i); if (!name) { @@ -226,6 +218,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, "%-40s %s\n", name, enabled ? "enabled" : "-"); } + spin_unlock_bh(&ar->data_lock); ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -242,169 +235,182 @@ static const struct file_operations fops_wmi_services = { .llseek = default_llseek, }; -void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev) +static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) { - u8 *tmp = ev->data; - struct ath10k_target_stats *stats; - int num_pdev_stats, num_vdev_stats, num_peer_stats; - struct wmi_pdev_stats_10x *ps; - int i; + struct ath10k_fw_stats_pdev *i, *tmp; + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_debug_fw_stats_peers_free(struct list_head *head) +{ + struct ath10k_fw_stats_peer *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath10k_debug_fw_stats_reset(struct ath10k *ar) +{ spin_lock_bh(&ar->data_lock); + ar->debug.fw_stats_done = false; + ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); + spin_unlock_bh(&ar->data_lock); +} - stats = &ar->debug.target_stats; - - num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */ - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */ - num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */ - - if (num_pdev_stats) { - ps = (struct wmi_pdev_stats_10x *)tmp; - - stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf); - stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count); - stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count); - stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count); - stats->cycle_count = __le32_to_cpu(ps->cycle_count); - stats->phy_err_count = __le32_to_cpu(ps->phy_err_count); - stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr); - - stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued); - stats->comp_delivered = - __le32_to_cpu(ps->wal.tx.comp_delivered); - stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued); - stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued); - stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop); - stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued); - stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed); - stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued); - stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped); - stats->underrun = __le32_to_cpu(ps->wal.tx.underrun); - stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort); - stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed); - stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko); - stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc); - stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers); - stats->sw_retry_failure = - __le32_to_cpu(ps->wal.tx.sw_retry_failure); - stats->illgl_rate_phy_err = - __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err); - stats->pdev_cont_xretry = - __le32_to_cpu(ps->wal.tx.pdev_cont_xretry); - stats->pdev_tx_timeout = - __le32_to_cpu(ps->wal.tx.pdev_tx_timeout); - stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets); - stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun); - stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf); - - stats->mid_ppdu_route_change = - __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change); - stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd); - stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags); - stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags); - stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags); - stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags); - stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus); - stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus); - stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus); - stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus); - stats->oversize_amsdu = - __le32_to_cpu(ps->wal.rx.oversize_amsdu); - stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs); - stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop); - stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs); - - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, - ar->fw_features)) { - stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad); - stats->rts_bad = __le32_to_cpu(ps->rts_bad); - stats->rts_good = __le32_to_cpu(ps->rts_good); - stats->fcs_bad = __le32_to_cpu(ps->fcs_bad); - stats->no_beacons = __le32_to_cpu(ps->no_beacons); - stats->mib_int_count = __le32_to_cpu(ps->mib_int_count); - tmp += sizeof(struct wmi_pdev_stats_10x); - } else { - tmp += sizeof(struct wmi_pdev_stats_old); - } +static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head) +{ + struct ath10k_fw_stats_peer *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + +void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_fw_stats stats = {}; + bool is_start, is_started, is_end; + size_t num_peers; + int ret; + + INIT_LIST_HEAD(&stats.pdevs); + INIT_LIST_HEAD(&stats.peers); + + spin_lock_bh(&ar->data_lock); + ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); + if (ret) { + ath10k_warn(ar, "failed to pull fw stats: %d\n", ret); + goto unlock; } - /* 0 or max vdevs */ - /* Currently firmware does not support VDEV stats */ - if (num_vdev_stats) { - struct wmi_vdev_stats *vdev_stats; + /* Stat data may exceed htc-wmi buffer limit. In such case firmware + * splits the stats data and delivers it in a ping-pong fashion of + * request cmd-update event. + * + * However there is no explicit end-of-data. Instead start-of-data is + * used as an implicit one. This works as follows: + * a) discard stat update events until one with pdev stats is + * delivered - this skips session started at end of (b) + * b) consume stat update events until another one with pdev stats is + * delivered which is treated as end-of-data and is itself discarded + */ - for (i = 0; i < num_vdev_stats; i++) { - vdev_stats = (struct wmi_vdev_stats *)tmp; - tmp += sizeof(struct wmi_vdev_stats); - } + if (ar->debug.fw_stats_done) { + ath10k_warn(ar, "received unsolicited stats update event\n"); + goto free; } - if (num_peer_stats) { - struct wmi_peer_stats_10x *peer_stats; - struct ath10k_peer_stat *s; - - stats->peers = num_peer_stats; - - for (i = 0; i < num_peer_stats; i++) { - peer_stats = (struct wmi_peer_stats_10x *)tmp; - s = &stats->peer_stat[i]; - - memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr, - ETH_ALEN); - s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi); - s->peer_tx_rate = - __le32_to_cpu(peer_stats->peer_tx_rate); - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, - ar->fw_features)) { - s->peer_rx_rate = - __le32_to_cpu(peer_stats->peer_rx_rate); - tmp += sizeof(struct wmi_peer_stats_10x); - - } else { - tmp += sizeof(struct wmi_peer_stats_old); - } + num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers); + is_start = (list_empty(&ar->debug.fw_stats.pdevs) && + !list_empty(&stats.pdevs)); + is_end = (!list_empty(&ar->debug.fw_stats.pdevs) && + !list_empty(&stats.pdevs)); + + if (is_start) + list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); + + if (is_end) + ar->debug.fw_stats_done = true; + + is_started = !list_empty(&ar->debug.fw_stats.pdevs); + + if (is_started && !is_end) { + if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) { + /* Although this is unlikely impose a sane limit to + * prevent firmware from DoS-ing the host. + */ + ath10k_warn(ar, "dropping fw peer stats\n"); + goto free; } + + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); } + complete(&ar->debug.fw_stats_complete); + +free: + /* In some cases lists have been spliced and cleared. Free up + * resources if that is not the case. + */ + ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); + ath10k_debug_fw_stats_peers_free(&stats.peers); + +unlock: spin_unlock_bh(&ar->data_lock); - complete(&ar->debug.event_stats_compl); } -static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +static int ath10k_debug_fw_stats_request(struct ath10k *ar) { - struct ath10k *ar = file->private_data; - struct ath10k_target_stats *fw_stats; - char *buf = NULL; - unsigned int len = 0, buf_len = 8000; - ssize_t ret_cnt = 0; - long left; - int i; + unsigned long timeout; int ret; - fw_stats = &ar->debug.target_stats; + lockdep_assert_held(&ar->conf_mutex); - mutex_lock(&ar->conf_mutex); + timeout = jiffies + msecs_to_jiffies(1*HZ); - if (ar->state != ATH10K_STATE_ON) - goto exit; + ath10k_debug_fw_stats_reset(ar); - buf = kzalloc(buf_len, GFP_KERNEL); - if (!buf) - goto exit; + for (;;) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; - ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); - if (ret) { - ath10k_warn(ar, "could not request stats (%d)\n", ret); - goto exit; + reinit_completion(&ar->debug.fw_stats_complete); + + ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); + if (ret) { + ath10k_warn(ar, "could not request stats (%d)\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, + 1*HZ); + if (ret <= 0) + return -ETIMEDOUT; + + spin_lock_bh(&ar->data_lock); + if (ar->debug.fw_stats_done) { + spin_unlock_bh(&ar->data_lock); + break; + } + spin_unlock_bh(&ar->data_lock); } - left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ); - if (left <= 0) - goto exit; + return 0; +} + +/* FIXME: How to calculate the buffer size sanely? */ +#define ATH10K_FW_STATS_BUF_SIZE (1024*1024) + +static void ath10k_fw_stats_fill(struct ath10k *ar, + struct ath10k_fw_stats *fw_stats, + char *buf) +{ + unsigned int len = 0; + unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE; + const struct ath10k_fw_stats_pdev *pdev; + const struct ath10k_fw_stats_peer *peer; + size_t num_peers; spin_lock_bh(&ar->data_lock); + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath10k_fw_stats_pdev, list); + if (!pdev) { + ath10k_warn(ar, "failed to get pdev stats\n"); + goto unlock; + } + + num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers); + len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath10k PDEV stats"); @@ -412,29 +418,29 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, "================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Channel noise floor", fw_stats->ch_noise_floor); + "Channel noise floor", pdev->ch_noise_floor); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Channel TX power", fw_stats->chan_tx_power); + "Channel TX power", pdev->chan_tx_power); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "TX frame count", fw_stats->tx_frame_count); + "TX frame count", pdev->tx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX frame count", fw_stats->rx_frame_count); + "RX frame count", pdev->rx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RX clear count", fw_stats->rx_clear_count); + "RX clear count", pdev->rx_clear_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "Cycle count", fw_stats->cycle_count); + "Cycle count", pdev->cycle_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "PHY error count", fw_stats->phy_err_count); + "PHY error count", pdev->phy_err_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RTS bad count", fw_stats->rts_bad); + "RTS bad count", pdev->rts_bad); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "RTS good count", fw_stats->rts_good); + "RTS good count", pdev->rts_good); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "FCS bad count", fw_stats->fcs_bad); + "FCS bad count", pdev->fcs_bad); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "No beacon count", fw_stats->no_beacons); + "No beacon count", pdev->no_beacons); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", - "MIB int count", fw_stats->mib_int_count); + "MIB int count", pdev->mib_int_count); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", @@ -443,51 +449,51 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, "================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies queued", fw_stats->comp_queued); + "HTT cookies queued", pdev->comp_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HTT cookies disp.", fw_stats->comp_delivered); + "HTT cookies disp.", pdev->comp_delivered); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDU queued", fw_stats->msdu_enqued); + "MSDU queued", pdev->msdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU queued", fw_stats->mpdu_enqued); + "MPDU queued", pdev->mpdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs dropped", fw_stats->wmm_drop); + "MSDUs dropped", pdev->wmm_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local enqued", fw_stats->local_enqued); + "Local enqued", pdev->local_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Local freed", fw_stats->local_freed); + "Local freed", pdev->local_freed); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW queued", fw_stats->hw_queued); + "HW queued", pdev->hw_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs reaped", fw_stats->hw_reaped); + "PPDUs reaped", pdev->hw_reaped); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Num underruns", fw_stats->underrun); + "Num underruns", pdev->underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PPDUs cleaned", fw_stats->tx_abort); + "PPDUs cleaned", pdev->tx_abort); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs requed", fw_stats->mpdus_requed); + "MPDUs requed", pdev->mpdus_requed); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Excessive retries", fw_stats->tx_ko); + "Excessive retries", pdev->tx_ko); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "HW rate", fw_stats->data_rc); + "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Sched self tiggers", fw_stats->self_triggers); + "Sched self tiggers", pdev->self_triggers); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Dropped due to SW retries", - fw_stats->sw_retry_failure); + pdev->sw_retry_failure); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Illegal rate phy errors", - fw_stats->illgl_rate_phy_err); + pdev->illgl_rate_phy_err); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Pdev continous xretry", fw_stats->pdev_cont_xretry); + "Pdev continous xretry", pdev->pdev_cont_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "TX timeout", fw_stats->pdev_tx_timeout); + "TX timeout", pdev->pdev_tx_timeout); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PDEV resets", fw_stats->pdev_resets); + "PDEV resets", pdev->pdev_resets); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY underrun", fw_stats->phy_underrun); + "PHY underrun", pdev->phy_underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU is more than txop limit", fw_stats->txop_ovf); + "MPDU is more than txop limit", pdev->txop_ovf); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", @@ -497,70 +503,161 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Mid PPDU route change", - fw_stats->mid_ppdu_route_change); + pdev->mid_ppdu_route_change); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Tot. number of statuses", fw_stats->status_rcvd); + "Tot. number of statuses", pdev->status_rcvd); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 0", fw_stats->r0_frags); + "Extra frags on rings 0", pdev->r0_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 1", fw_stats->r1_frags); + "Extra frags on rings 1", pdev->r1_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 2", fw_stats->r2_frags); + "Extra frags on rings 2", pdev->r2_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Extra frags on rings 3", fw_stats->r3_frags); + "Extra frags on rings 3", pdev->r3_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to HTT", fw_stats->htt_msdus); + "MSDUs delivered to HTT", pdev->htt_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to HTT", fw_stats->htt_mpdus); + "MPDUs delivered to HTT", pdev->htt_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MSDUs delivered to stack", fw_stats->loc_msdus); + "MSDUs delivered to stack", pdev->loc_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDUs delivered to stack", fw_stats->loc_mpdus); + "MPDUs delivered to stack", pdev->loc_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Oversized AMSUs", fw_stats->oversize_amsdu); + "Oversized AMSUs", pdev->oversize_amsdu); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors", fw_stats->phy_errs); + "PHY errors", pdev->phy_errs); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "PHY errors drops", fw_stats->phy_err_drop); + "PHY errors drops", pdev->phy_err_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs); + "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); len += scnprintf(buf + len, buf_len - len, "\n"); - len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n", - "ath10k PEER stats", fw_stats->peers); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath10k PEER stats", num_peers); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); - for (i = 0; i < fw_stats->peers; i++) { + list_for_each_entry(peer, &fw_stats->peers, list) { len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", - "Peer MAC address", - fw_stats->peer_stat[i].peer_macaddr); + "Peer MAC address", peer->peer_macaddr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RSSI", fw_stats->peer_stat[i].peer_rssi); + "Peer RSSI", peer->peer_rssi); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer TX rate", - fw_stats->peer_stat[i].peer_tx_rate); + "Peer TX rate", peer->peer_tx_rate); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", - "Peer RX rate", - fw_stats->peer_stat[i].peer_rx_rate); + "Peer RX rate", peer->peer_rx_rate); len += scnprintf(buf + len, buf_len - len, "\n"); } + +unlock: spin_unlock_bh(&ar->data_lock); - if (len > buf_len) - len = buf_len; + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} - ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); +static int ath10k_fw_stats_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf = NULL; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE); + if (!buf) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = ath10k_debug_fw_stats_request(ar); + if (ret) { + ath10k_warn(ar, "failed to request fw stats: %d\n", ret); + goto err_free; + } + + ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf); + file->private_data = buf; -exit: mutex_unlock(&ar->conf_mutex); - kfree(buf); - return ret_cnt; + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath10k_fw_stats_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + unsigned int len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_fw_stats = { - .read = ath10k_read_fw_stats, + .open = ath10k_fw_stats_open, + .release = ath10k_fw_stats_release, + .read = ath10k_fw_stats_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + int ret, len, buf_len; + char *buf; + + buf_len = 500; + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_bh(&ar->data_lock); + + len = 0; + len += scnprintf(buf + len, buf_len - len, + "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); + len += scnprintf(buf + len, buf_len - len, + "fw_warm_reset_counter\t\t%d\n", + ar->stats.fw_warm_reset_counter); + len += scnprintf(buf + len, buf_len - len, + "fw_cold_reset_counter\t\t%d\n", + ar->stats.fw_cold_reset_counter); + + spin_unlock_bh(&ar->data_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + + return ret; +} + +static const struct file_operations fops_fw_reset_stats = { .open = simple_open, + .read = ath10k_debug_fw_reset_stats_read, .owner = THIS_MODULE, .llseek = default_llseek, }; @@ -593,7 +690,8 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file, "To simulate firmware crash write one of the keywords to this file:\n" "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n" "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n" - "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"; + "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n" + "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } @@ -646,6 +744,10 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, } else if (!strcmp(buf, "assert")) { ath10k_info(ar, "simulating firmware assert crash\n"); ret = ath10k_debug_fw_assert(ar); + } else if (!strcmp(buf, "hw-restart")) { + ath10k_info(ar, "user requested hw restart\n"); + queue_work(ar->workqueue, &ar->restart_work); + ret = 0; } else { ret = -EINVAL; goto exit; @@ -759,8 +861,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, sizeof(dump_data->fw_ver)); - dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE); - strlcpy(dump_data->kernel_ver, VERMAGIC_STRING, + dump_data->kernel_ver_code = 0; + strlcpy(dump_data->kernel_ver, init_utsname()->release, sizeof(dump_data->kernel_ver)); dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); @@ -822,6 +924,236 @@ static const struct file_operations fops_fw_crash_dump = { .llseek = default_llseek, }; +static ssize_t ath10k_reg_addr_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 buf[32]; + unsigned int len = 0; + u32 reg_addr; + + mutex_lock(&ar->conf_mutex); + reg_addr = ar->debug.reg_addr; + mutex_unlock(&ar->conf_mutex); + + len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_reg_addr_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 reg_addr; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, ®_addr); + if (ret) + return ret; + + if (!IS_ALIGNED(reg_addr, 4)) + return -EFAULT; + + mutex_lock(&ar->conf_mutex); + ar->debug.reg_addr = reg_addr; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_reg_addr = { + .read = ath10k_reg_addr_read, + .write = ath10k_reg_addr_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_reg_value_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 buf[48]; + unsigned int len; + u32 reg_addr, reg_val; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + reg_addr = ar->debug.reg_addr; + + reg_val = ath10k_hif_read32(ar, reg_addr); + len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_reg_value_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 reg_addr, reg_val; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + reg_addr = ar->debug.reg_addr; + + ret = kstrtou32_from_user(user_buf, count, 0, ®_val); + if (ret) + goto exit; + + ath10k_hif_write32(ar, reg_addr, reg_val); + + ret = count; + +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_reg_value = { + .read = ath10k_reg_value_read, + .write = ath10k_reg_value_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath10k_mem_value_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 *buf; + int ret; + + if (*ppos < 0) + return -EINVAL; + + if (!count) + return 0; + + mutex_lock(&ar->conf_mutex); + + buf = vmalloc(count); + if (!buf) { + ret = -ENOMEM; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + ret = ath10k_hif_diag_read(ar, *ppos, buf, count); + if (ret) { + ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n", + (u32)(*ppos), ret); + goto exit; + } + + ret = copy_to_user(user_buf, buf, count); + if (ret) { + ret = -EFAULT; + goto exit; + } + + count -= ret; + *ppos += count; + ret = count; + +exit: + vfree(buf); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_mem_value_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u8 *buf; + int ret; + + if (*ppos < 0) + return -EINVAL; + + if (!count) + return 0; + + mutex_lock(&ar->conf_mutex); + + buf = vmalloc(count); + if (!buf) { + ret = -ENOMEM; + goto exit; + } + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto exit; + } + + ret = copy_from_user(buf, user_buf, count); + if (ret) { + ret = -EFAULT; + goto exit; + } + + ret = ath10k_hif_diag_write(ar, *ppos, buf, count); + if (ret) { + ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n", + (u32)(*ppos), ret); + goto exit; + } + + *ppos += count; + ret = count; + +exit: + vfree(buf); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_mem_value = { + .read = ath10k_mem_value_read, + .write = ath10k_mem_value_write, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static int ath10k_debug_htt_stats_req(struct ath10k *ar) { u64 cookie; @@ -1029,6 +1361,166 @@ exit: return ret; } +/* TODO: Would be nice to always support ethtool stats, would need to + * move the stats storage out of ath10k_debug, or always have ath10k_debug + * struct available.. + */ + +/* This generally cooresponds to the debugfs fw_stats file */ +static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx_pkts_nic", + "tx_bytes_nic", + "rx_pkts_nic", + "rx_bytes_nic", + "d_noise_floor", + "d_cycle_count", + "d_phy_error", + "d_rts_bad", + "d_rts_good", + "d_tx_power", /* in .5 dbM I think */ + "d_rx_crc_err", /* fcs_bad */ + "d_no_beacon", + "d_tx_mpdus_queued", + "d_tx_msdu_queued", + "d_tx_msdu_dropped", + "d_local_enqued", + "d_local_freed", + "d_tx_ppdu_hw_queued", + "d_tx_ppdu_reaped", + "d_tx_fifo_underrun", + "d_tx_ppdu_abort", + "d_tx_mpdu_requed", + "d_tx_excessive_retries", + "d_tx_hw_rate", + "d_tx_dropped_sw_retries", + "d_tx_illegal_rate", + "d_tx_continuous_xretries", + "d_tx_timeout", + "d_tx_mpdu_txop_limit", + "d_pdev_resets", + "d_rx_mid_ppdu_route_change", + "d_rx_status", + "d_rx_extra_frags_ring0", + "d_rx_extra_frags_ring1", + "d_rx_extra_frags_ring2", + "d_rx_extra_frags_ring3", + "d_rx_msdu_htt", + "d_rx_mpdu_htt", + "d_rx_msdu_stack", + "d_rx_mpdu_stack", + "d_rx_phy_err", + "d_rx_phy_err_drops", + "d_rx_mpdu_errors", /* FCS, MIC, ENC */ + "d_fw_crash_count", + "d_fw_warm_reset_count", + "d_fw_cold_reset_count", +}; + +#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats) + +void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *ath10k_gstrings_stats, + sizeof(ath10k_gstrings_stats)); +} + +int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return ATH10K_SSTATS_LEN; + + return 0; +} + +void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct ath10k *ar = hw->priv; + static const struct ath10k_fw_stats_pdev zero_stats = {}; + const struct ath10k_fw_stats_pdev *pdev_stats; + int i = 0, ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ATH10K_STATE_ON) { + ret = ath10k_debug_fw_stats_request(ar); + if (ret) { + /* just print a warning and try to use older results */ + ath10k_warn(ar, + "failed to get fw stats for ethtool: %d\n", + ret); + } + } + + pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs, + struct ath10k_fw_stats_pdev, + list); + if (!pdev_stats) { + /* no results available so just return zeroes */ + pdev_stats = &zero_stats; + } + + spin_lock_bh(&ar->data_lock); + + data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */ + data[i++] = 0; /* tx bytes */ + data[i++] = pdev_stats->htt_mpdus; + data[i++] = 0; /* rx bytes */ + data[i++] = pdev_stats->ch_noise_floor; + data[i++] = pdev_stats->cycle_count; + data[i++] = pdev_stats->phy_err_count; + data[i++] = pdev_stats->rts_bad; + data[i++] = pdev_stats->rts_good; + data[i++] = pdev_stats->chan_tx_power; + data[i++] = pdev_stats->fcs_bad; + data[i++] = pdev_stats->no_beacons; + data[i++] = pdev_stats->mpdu_enqued; + data[i++] = pdev_stats->msdu_enqued; + data[i++] = pdev_stats->wmm_drop; + data[i++] = pdev_stats->local_enqued; + data[i++] = pdev_stats->local_freed; + data[i++] = pdev_stats->hw_queued; + data[i++] = pdev_stats->hw_reaped; + data[i++] = pdev_stats->underrun; + data[i++] = pdev_stats->tx_abort; + data[i++] = pdev_stats->mpdus_requed; + data[i++] = pdev_stats->tx_ko; + data[i++] = pdev_stats->data_rc; + data[i++] = pdev_stats->sw_retry_failure; + data[i++] = pdev_stats->illgl_rate_phy_err; + data[i++] = pdev_stats->pdev_cont_xretry; + data[i++] = pdev_stats->pdev_tx_timeout; + data[i++] = pdev_stats->txop_ovf; + data[i++] = pdev_stats->pdev_resets; + data[i++] = pdev_stats->mid_ppdu_route_change; + data[i++] = pdev_stats->status_rcvd; + data[i++] = pdev_stats->r0_frags; + data[i++] = pdev_stats->r1_frags; + data[i++] = pdev_stats->r2_frags; + data[i++] = pdev_stats->r3_frags; + data[i++] = pdev_stats->htt_msdus; + data[i++] = pdev_stats->htt_mpdus; + data[i++] = pdev_stats->loc_msdus; + data[i++] = pdev_stats->loc_mpdus; + data[i++] = pdev_stats->phy_errs; + data[i++] = pdev_stats->phy_err_drop; + data[i++] = pdev_stats->mpdu_errs; + data[i++] = ar->stats.fw_crash_counter; + data[i++] = ar->stats.fw_warm_reset_counter; + data[i++] = ar->stats.fw_cold_reset_counter; + + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); + + WARN_ON(i != ATH10K_SSTATS_LEN); +} + static const struct file_operations fops_fw_dbglog = { .read = ath10k_read_fw_dbglog, .write = ath10k_write_fw_dbglog, @@ -1037,6 +1529,84 @@ static const struct file_operations fops_fw_dbglog = { .llseek = default_llseek, }; +static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + void *buf; + u32 hi_addr; + __le32 addr; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_UTF) { + ret = -ENETDOWN; + goto err; + } + + buf = vmalloc(QCA988X_CAL_DATA_LEN); + if (!buf) { + ret = -ENOMEM; + goto err; + } + + hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); + + ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); + if (ret) { + ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret); + goto err_vfree; + } + + ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf, + QCA988X_CAL_DATA_LEN); + if (ret) { + ath10k_warn(ar, "failed to read calibration data: %d\n", ret); + goto err_vfree; + } + + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + + return 0; + +err_vfree: + vfree(buf); + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static ssize_t ath10k_debug_cal_data_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + void *buf = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, + buf, QCA988X_CAL_DATA_LEN); +} + +static int ath10k_debug_cal_data_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static const struct file_operations fops_cal_data = { + .open = ath10k_debug_cal_data_open, + .read = ath10k_debug_cal_data_read, + .release = ath10k_debug_cal_data_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + int ath10k_debug_start(struct ath10k *ar) { int ret; @@ -1057,7 +1627,22 @@ int ath10k_debug_start(struct ath10k *ar) ret); } - return 0; + if (ar->debug.pktlog_filter) { + ret = ath10k_wmi_pdev_pktlog_enable(ar, + ar->debug.pktlog_filter); + if (ret) + /* not serious */ + ath10k_warn(ar, + "failed to enable pktlog filter %x: %d\n", + ar->debug.pktlog_filter, ret); + } else { + ret = ath10k_wmi_pdev_pktlog_disable(ar); + if (ret) + /* not serious */ + ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); + } + + return ret; } void ath10k_debug_stop(struct ath10k *ar) @@ -1072,6 +1657,8 @@ void ath10k_debug_stop(struct ath10k *ar) ar->debug.htt_max_amsdu = 0; ar->debug.htt_max_ampdu = 0; + + ath10k_wmi_pdev_pktlog_disable(ar); } static ssize_t ath10k_write_simulate_radar(struct file *file, @@ -1154,12 +1741,78 @@ static const struct file_operations fops_dfs_stats = { .llseek = default_llseek, }; +static ssize_t ath10k_write_pktlog_filter(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) { + ar->debug.pktlog_filter = filter; + ret = count; + goto out; + } + + if (filter && (filter != ar->debug.pktlog_filter)) { + ret = ath10k_wmi_pdev_pktlog_enable(ar, filter); + if (ret) { + ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n", + ar->debug.pktlog_filter, ret); + goto out; + } + } else { + ret = ath10k_wmi_pdev_pktlog_disable(ar); + if (ret) { + ath10k_warn(ar, "failed to disable pktlog: %d\n", ret); + goto out; + } + } + + ar->debug.pktlog_filter = filter; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[32]; + struct ath10k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x\n", + ar->debug.pktlog_filter); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_pktlog_filter = { + .read = ath10k_read_pktlog_filter, + .write = ath10k_write_pktlog_filter, + .open = simple_open +}; + int ath10k_debug_create(struct ath10k *ar) { ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); if (!ar->debug.fw_crash_data) return -ENOMEM; + INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); + INIT_LIST_HEAD(&ar->debug.fw_stats.peers); + return 0; } @@ -1167,6 +1820,8 @@ void ath10k_debug_destroy(struct ath10k *ar) { vfree(ar->debug.fw_crash_data); ar->debug.fw_crash_data = NULL; + + ath10k_debug_fw_stats_reset(ar); } int ath10k_debug_register(struct ath10k *ar) @@ -1183,11 +1838,14 @@ int ath10k_debug_register(struct ath10k *ar) INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); - init_completion(&ar->debug.event_stats_compl); + init_completion(&ar->debug.fw_stats_complete); debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_stats); + debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_fw_reset_stats); + debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_wmi_services); @@ -1197,6 +1855,15 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_crash_dump); + debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_reg_addr); + + debugfs_create_file("reg_value", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_reg_value); + + debugfs_create_file("mem_value", S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_mem_value); + debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_chip_id); @@ -1210,6 +1877,9 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_fw_dbglog); + debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_cal_data); + if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", S_IWUSR, ar->debug.debugfs_phy, ar, @@ -1224,6 +1894,9 @@ int ath10k_debug_register(struct ath10k *ar) &fops_dfs_stats); } + debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR, + ar->debug.debugfs_phy, ar, &fops_pktlog_filter); + return 0; } @@ -1260,11 +1933,26 @@ void ath10k_dbg_dump(struct ath10k *ar, const char *msg, const char *prefix, const void *buf, size_t len) { + char linebuf[256]; + unsigned int linebuflen; + const void *ptr; + if (ath10k_debug_mask & mask) { if (msg) ath10k_dbg(ar, mask, "%s\n", msg); - print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); + for (ptr = buf; (ptr - buf) < len; ptr += 16) { + linebuflen = 0; + linebuflen += scnprintf(linebuf + linebuflen, + sizeof(linebuf) - linebuflen, + "%s%08x: ", + (prefix ? prefix : ""), + (unsigned int)(ptr - buf)); + hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, + linebuf + linebuflen, + sizeof(linebuf) - linebuflen, true); + dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf); + } } /* tracing code doesn't like null strings :/ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index b3774f7f492c..1b87a5dbec53 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -35,14 +35,24 @@ enum ath10k_debug_mask { ATH10K_DBG_BMI = 0x00000400, ATH10K_DBG_REGULATORY = 0x00000800, ATH10K_DBG_TESTMODE = 0x00001000, + ATH10K_DBG_WMI_PRINT = 0x00002000, ATH10K_DBG_ANY = 0xffffffff, }; +enum ath10k_pktlog_filter { + ATH10K_PKTLOG_RX = 0x000000001, + ATH10K_PKTLOG_TX = 0x000000002, + ATH10K_PKTLOG_RCFIND = 0x000000004, + ATH10K_PKTLOG_RCUPDATE = 0x000000008, + ATH10K_PKTLOG_DBG_PRINT = 0x000000010, + ATH10K_PKTLOG_ANY = 0x00000001f, +}; + extern unsigned int ath10k_debug_mask; -__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...); -__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...); -__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...); void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS @@ -52,18 +62,22 @@ int ath10k_debug_create(struct ath10k *ar); void ath10k_debug_destroy(struct ath10k *ar); int ath10k_debug_register(struct ath10k *ar); void ath10k_debug_unregister(struct ath10k *ar); -void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size); -void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev); +void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb); struct ath10k_fw_crash_data * ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); - #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) +void ath10k_debug_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data); +int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset); +void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); + #else static inline int ath10k_debug_start(struct ath10k *ar) { @@ -92,14 +106,8 @@ static inline void ath10k_debug_unregister(struct ath10k *ar) { } -static inline void ath10k_debug_read_service_map(struct ath10k *ar, - void *service_map, - size_t map_size) -{ -} - -static inline void ath10k_debug_read_target_stats(struct ath10k *ar, - struct wmi_stats_event *ev) +static inline void ath10k_debug_fw_stats_process(struct ath10k *ar, + struct sk_buff *skb) { } @@ -116,6 +124,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) +#define ath10k_debug_get_et_strings NULL +#define ath10k_debug_get_et_sset_count NULL +#define ath10k_debug_get_et_stats NULL + #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 62323fea27e1..0c92e0251e84 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include "core.h" +#include "debug.h" struct ath10k_hif_sg_item { u16 transfer_id; @@ -31,11 +32,9 @@ struct ath10k_hif_sg_item { struct ath10k_hif_cb { int (*tx_completion)(struct ath10k *ar, - struct sk_buff *wbuf, - unsigned transfer_id); + struct sk_buff *wbuf); int (*rx_completion)(struct ath10k *ar, - struct sk_buff *wbuf, - u8 pipe_id); + struct sk_buff *wbuf); }; struct ath10k_hif_ops { @@ -43,6 +42,12 @@ struct ath10k_hif_ops { int (*tx_sg)(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items); + /* read firmware memory through the diagnose interface */ + int (*diag_read)(struct ath10k *ar, u32 address, void *buf, + size_t buf_len); + + int (*diag_write)(struct ath10k *ar, u32 address, const void *data, + int nbytes); /* * API to handle HIF-specific BMI message exchanges, this API is * synchronous and only allowed to be called from a context that @@ -80,6 +85,10 @@ struct ath10k_hif_ops { u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id); + u32 (*read32)(struct ath10k *ar, u32 address); + + void (*write32)(struct ath10k *ar, u32 address, u32 value); + /* Power up the device and enter BMI transfer mode for FW download */ int (*power_up)(struct ath10k *ar); @@ -98,6 +107,21 @@ static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items); } +static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + return ar->hif.ops->diag_read(ar, address, buf, buf_len); +} + +static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + if (!ar->hif.ops->diag_write) + return -EOPNOTSUPP; + + return ar->hif.ops->diag_write(ar, address, data, nbytes); +} + static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar, void *request, u32 request_len, void *response, u32 *response_len) @@ -177,4 +201,25 @@ static inline int ath10k_hif_resume(struct ath10k *ar) return ar->hif.ops->resume(ar); } +static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address) +{ + if (!ar->hif.ops->read32) { + ath10k_warn(ar, "hif read32 not supported\n"); + return 0xdeaddead; + } + + return ar->hif.ops->read32(ar, address); +} + +static inline void ath10k_hif_write32(struct ath10k *ar, + u32 address, u32 data) +{ + if (!ar->hif.ops->write32) { + ath10k_warn(ar, "hif write32 not supported\n"); + return; + } + + ar->hif.ops->write32(ar, address, data); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 676bd4ed969b..f1946a6be442 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -160,6 +160,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, ath10k_htc_prepare_tx_skb(ep, skb); + skb_cb->eid = eid; skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); ret = dma_mapping_error(dev, skb_cb->paddr); if (ret) @@ -197,15 +198,18 @@ err_pull: } static int ath10k_htc_tx_completion_handler(struct ath10k *ar, - struct sk_buff *skb, - unsigned int eid) + struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; - struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + struct ath10k_skb_cb *skb_cb; + struct ath10k_htc_ep *ep; if (WARN_ON_ONCE(!skb)) return 0; + skb_cb = ATH10K_SKB_CB(skb); + ep = &htc->endpoint[skb_cb->eid]; + ath10k_htc_notify_tx_completion(ep, skb); /* the skb now belongs to the completion handler */ @@ -317,8 +321,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, } static int ath10k_htc_rx_completion_handler(struct ath10k *ar, - struct sk_buff *skb, - u8 pipe_id) + struct sk_buff *skb) { int status = 0; struct ath10k_htc *htc = &ar->htc; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 3b44217a6c19..1bd5545af903 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -126,6 +126,7 @@ enum htt_data_tx_ext_tid { * (HL hosts manage queues on the host ) * more_in_batch: only for HL hosts. indicates if more packets are * pending. this allows target to wait and aggregate + * freq: 0 means home channel of given vdev. intended for offchannel */ struct htt_data_tx_desc { u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */ @@ -133,7 +134,8 @@ struct htt_data_tx_desc { __le16 len; __le16 id; __le32 frags_paddr; - __le32 peerid; + __le16 peerid; + __le16 freq; u8 prefetch[0]; /* start of frame, for FW classification engine */ } __packed; @@ -156,6 +158,9 @@ enum htt_rx_ring_flags { HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15 }; +#define HTT_RX_RING_SIZE_MIN 128 +#define HTT_RX_RING_SIZE_MAX 2048 + struct htt_rx_ring_setup_ring { __le32 fw_idx_shadow_reg_paddr; __le32 rx_ring_base_paddr; @@ -725,7 +730,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test) */ struct htt_pktlog_msg { u8 pad[3]; - __le32 payload[1 /* or more */]; + u8 payload[0]; } __packed; struct htt_dbg_stats_rx_reorder_stats { diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 60d40a04508b..9c782a42665e 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -25,19 +25,8 @@ #include <linux/log2.h> -/* slightly larger than one large A-MPDU */ -#define HTT_RX_RING_SIZE_MIN 128 - -/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */ -#define HTT_RX_RING_SIZE_MAX 2048 - -#define HTT_RX_AVG_FRM_BYTES 1000 - -/* ms, very conservative */ -#define HTT_RX_HOST_LATENCY_MAX_MS 20 - -/* ms, conservative */ -#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 +#define HTT_RX_RING_SIZE 1024 +#define HTT_RX_RING_FILL_LEVEL 1000 /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 @@ -45,68 +34,6 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); static void ath10k_htt_txrx_compl_task(unsigned long ptr); -static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) -{ - int size; - - /* - * It is expected that the host CPU will typically be able to - * service the rx indication from one A-MPDU before the rx - * indication from the subsequent A-MPDU happens, roughly 1-2 ms - * later. However, the rx ring should be sized very conservatively, - * to accomodate the worst reasonable delay before the host CPU - * services a rx indication interrupt. - * - * The rx ring need not be kept full of empty buffers. In theory, - * the htt host SW can dynamically track the low-water mark in the - * rx ring, and dynamically adjust the level to which the rx ring - * is filled with empty buffers, to dynamically meet the desired - * low-water mark. - * - * In contrast, it's difficult to resize the rx ring itself, once - * it's in use. Thus, the ring itself should be sized very - * conservatively, while the degree to which the ring is filled - * with empty buffers should be sized moderately conservatively. - */ - - /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ - size = - htt->max_throughput_mbps + - 1000 / - (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS; - - if (size < HTT_RX_RING_SIZE_MIN) - size = HTT_RX_RING_SIZE_MIN; - - if (size > HTT_RX_RING_SIZE_MAX) - size = HTT_RX_RING_SIZE_MAX; - - size = roundup_pow_of_two(size); - - return size; -} - -static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt) -{ - int size; - - /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ - size = - htt->max_throughput_mbps * - 1000 / - (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS; - - /* - * Make sure the fill level is at least 1 less than the ring size. - * Leaving 1 element empty allows the SW to easily distinguish - * between a full ring vs. an empty ring. - */ - if (size >= htt->rx_ring.size) - size = htt->rx_ring.size - 1; - - return size; -} - static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) { struct sk_buff *skb; @@ -291,50 +218,38 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) htt->rx_ring.sw_rd_idx.msdu_payld = idx; htt->rx_ring.fill_cnt--; - return msdu; -} + dma_unmap_single(htt->ar->dev, + ATH10K_SKB_CB(msdu)->paddr, + msdu->len + skb_tailroom(msdu), + DMA_FROM_DEVICE); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", + msdu->data, msdu->len + skb_tailroom(msdu)); -static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb) -{ - struct sk_buff *next; - - while (skb) { - next = skb->next; - dev_kfree_skb_any(skb); - skb = next; - } + return msdu; } /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, u8 **fw_desc, int *fw_desc_len, - struct sk_buff **head_msdu, - struct sk_buff **tail_msdu, - u32 *attention) + struct sk_buff_head *amsdu) { struct ath10k *ar = htt->ar; int msdu_len, msdu_chaining = 0; - struct sk_buff *msdu, *next; + struct sk_buff *msdu; struct htt_rx_desc *rx_desc; lockdep_assert_held(&htt->rx_ring.lock); - if (htt->rx_confused) { - ath10k_warn(ar, "htt is confused. refusing rx\n"); - return -1; - } - - msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); - while (msdu) { + for (;;) { int last_msdu, msdu_len_invalid, msdu_chained; - dma_unmap_single(htt->ar->dev, - ATH10K_SKB_CB(msdu)->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); + msdu = ath10k_htt_rx_netbuf_pop(htt); + if (!msdu) { + __skb_queue_purge(amsdu); + return -ENOENT; + } - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", - msdu->data, msdu->len + skb_tailroom(msdu)); + __skb_queue_tail(amsdu, msdu); rx_desc = (struct htt_rx_desc *)msdu->data; @@ -353,19 +268,10 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, */ if (!(__le32_to_cpu(rx_desc->attention.flags) & RX_ATTENTION_FLAGS_MSDU_DONE)) { - ath10k_htt_rx_free_msdu_chain(*head_msdu); - *head_msdu = NULL; - msdu = NULL; - ath10k_err(ar, "htt rx stopped. cannot recover\n"); - htt->rx_confused = true; - break; + __skb_queue_purge(amsdu); + return -EIO; } - *attention |= __le32_to_cpu(rx_desc->attention.flags) & - (RX_ATTENTION_FLAGS_TKIP_MIC_ERR | - RX_ATTENTION_FLAGS_DECRYPT_ERR | - RX_ATTENTION_FLAGS_FCS_ERR | - RX_ATTENTION_FLAGS_MGMT_TYPE); /* * Copy the FW rx descriptor for this MSDU from the rx * indication message into the MSDU's netbuf. HL uses the @@ -422,43 +328,32 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); msdu_len -= msdu->len; - /* FIXME: Do chained buffers include htt_rx_desc or not? */ + /* Note: Chained buffers do not contain rx descriptor */ while (msdu_chained--) { - struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); - - dma_unmap_single(htt->ar->dev, - ATH10K_SKB_CB(next)->paddr, - next->len + skb_tailroom(next), - DMA_FROM_DEVICE); - - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, - "htt rx chained: ", next->data, - next->len + skb_tailroom(next)); - - skb_trim(next, 0); - skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); - msdu_len -= next->len; + msdu = ath10k_htt_rx_netbuf_pop(htt); + if (!msdu) { + __skb_queue_purge(amsdu); + return -ENOENT; + } - msdu->next = next; - msdu = next; + __skb_queue_tail(amsdu, msdu); + skb_trim(msdu, 0); + skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); + msdu_len -= msdu->len; msdu_chaining = 1; } last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & RX_MSDU_END_INFO0_LAST_MSDU; - if (last_msdu) { - msdu->next = NULL; - break; - } + trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, + sizeof(*rx_desc) - sizeof(u32)); - next = ath10k_htt_rx_netbuf_pop(htt); - msdu->next = next; - msdu = next; + if (last_msdu) + break; } - *tail_msdu = msdu; - if (*head_msdu == NULL) + if (skb_queue_empty(amsdu)) msdu_chaining = -1; /* @@ -492,25 +387,20 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; - htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); + htt->rx_confused = false; + + /* XXX: The fill level could be changed during runtime in response to + * the host processing latency. Is this really worth it? + */ + htt->rx_ring.size = HTT_RX_RING_SIZE; + htt->rx_ring.size_mask = htt->rx_ring.size - 1; + htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL; + if (!is_power_of_2(htt->rx_ring.size)) { ath10k_warn(ar, "htt rx ring size is not power of 2\n"); return -EINVAL; } - htt->rx_ring.size_mask = htt->rx_ring.size - 1; - - /* - * Set the initial value for the level to which the rx ring - * should be filled, based on the max throughput and the - * worst likely latency for the host to fill the rx ring - * with new buffers. In theory, this fill level can be - * dynamically adjusted from the initial value set here, to - * reflect the actual host latency rather than a - * conservative assumption about the host latency. - */ - htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt); - htt->rx_ring.netbufs_ring = kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *), GFP_KERNEL); @@ -581,73 +471,50 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type) { switch (type) { + case HTT_RX_MPDU_ENCRYPT_NONE: + return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: - return 4; + return IEEE80211_WEP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: - case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */ + return IEEE80211_TKIP_IV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: - return 8; - case HTT_RX_MPDU_ENCRYPT_NONE: - return 0; + return IEEE80211_CCMP_HDR_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; } - ath10k_warn(ar, "unknown encryption type %d\n", type); + ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0; } +#define MICHAEL_MIC_LEN 8 + static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: + return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: - case HTT_RX_MPDU_ENCRYPT_WEP128: - case HTT_RX_MPDU_ENCRYPT_WAPI: - return 0; + return IEEE80211_WEP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - return 4; + return IEEE80211_TKIP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: - return 8; + return IEEE80211_CCMP_MIC_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; } - ath10k_warn(ar, "unknown encryption type %d\n", type); + ath10k_warn(ar, "unsupported encryption type %d\n", type); return 0; } -/* Applies for first msdu in chain, before altering it. */ -static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb) -{ - struct htt_rx_desc *rxd; - enum rx_msdu_decap_format fmt; - - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - - if (fmt == RX_MSDU_DECAP_RAW) - return (void *)skb->data; - - return (void *)skb->data - RX_HTT_HDR_STATUS_LEN; -} - -/* This function only applies for first msdu in an msdu chain */ -static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) -{ - u8 *qc; - - if (ieee80211_is_data_qos(hdr->frame_control)) { - qc = ieee80211_get_qos_ctl(hdr); - if (qc[0] & 0x80) - return true; - } - return false; -} - struct rfc1042_hdr { u8 llc_dsap; u8 llc_ssap; @@ -682,23 +549,34 @@ static const u8 rx_legacy_rate_idx[] = { }; static void ath10k_htt_rx_h_rates(struct ath10k *ar, - enum ieee80211_band band, - u8 info0, u32 info1, u32 info2, - struct ieee80211_rx_status *status) + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) { + enum ieee80211_band band; u8 cck, rate, rate_idx, bw, sgi, mcs, nss; u8 preamble = 0; + u32 info1, info2, info3; - /* Check if valid fields */ - if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID)) + /* Band value can't be set as undefined but freq can be 0 - use that to + * determine whether band is provided. + * + * FIXME: Perhaps this can go away if CCK rate reporting is a little + * reworked? + */ + if (!status->freq) return; - preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE); + band = status->band; + info1 = __le32_to_cpu(rxd->ppdu_start.info1); + info2 = __le32_to_cpu(rxd->ppdu_start.info2); + info3 = __le32_to_cpu(rxd->ppdu_start.info3); + + preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); switch (preamble) { case HTT_RX_LEGACY: - cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK; - rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE); + cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; + rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); rate_idx = 0; if (rate < 0x08 || rate > 0x0F) @@ -725,11 +603,11 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, break; case HTT_RX_HT: case HTT_RX_HT_WITH_TXBF: - /* HT-SIG - Table 20-11 in info1 and info2 */ - mcs = info1 & 0x1F; + /* HT-SIG - Table 20-11 in info2 and info3 */ + mcs = info2 & 0x1F; nss = mcs >> 3; - bw = (info1 >> 7) & 1; - sgi = (info2 >> 7) & 1; + bw = (info2 >> 7) & 1; + sgi = (info3 >> 7) & 1; status->rate_idx = mcs; status->flag |= RX_FLAG_HT; @@ -740,12 +618,12 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, break; case HTT_RX_VHT: case HTT_RX_VHT_WITH_TXBF: - /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2 + /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 TODO check this */ - mcs = (info2 >> 4) & 0x0F; - nss = ((info1 >> 10) & 0x07) + 1; - bw = info1 & 3; - sgi = info2 & 1; + mcs = (info3 >> 4) & 0x0F; + nss = ((info2 >> 10) & 0x07) + 1; + bw = info2 & 3; + sgi = info3 & 1; status->rate_idx = mcs; status->vht_nss = nss; @@ -773,41 +651,6 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar, } } -static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb, - enum htt_rx_mpdu_encrypt_type enctype, - enum rx_msdu_decap_format fmt, - bool dot11frag) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - - rx_status->flag &= ~(RX_FLAG_DECRYPTED | - RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED); - - if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) - return; - - /* - * There's no explicit rx descriptor flag to indicate whether a given - * frame has been decrypted or not. We're forced to use the decap - * format as an implicit indication. However fragmentation rx is always - * raw and it probably never reports undecrypted raws. - * - * This makes sure sniffed frames are reported as-is without stripping - * the protected flag. - */ - if (fmt == RX_MSDU_DECAP_RAW && !dot11frag) - return; - - rx_status->flag |= RX_FLAG_DECRYPTED | - RX_FLAG_IV_STRIPPED | - RX_FLAG_MMIC_STRIPPED; - hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) & - ~IEEE80211_FCTL_PROTECTED); -} - static bool ath10k_htt_rx_h_channel(struct ath10k *ar, struct ieee80211_rx_status *status) { @@ -828,6 +671,72 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, return true; } +static void ath10k_htt_rx_h_signal(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) +{ + /* FIXME: Get real NF */ + status->signal = ATH10K_DEFAULT_NOISE_FLOOR + + rxd->ppdu_start.rssi_comb; + status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; +} + +static void ath10k_htt_rx_h_mactime(struct ath10k *ar, + struct ieee80211_rx_status *status, + struct htt_rx_desc *rxd) +{ + /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This + * means all prior MSDUs in a PPDU are reported to mac80211 without the + * TSF. Is it worth holding frames until end of PPDU is known? + * + * FIXME: Can we get/compute 64bit TSF? + */ + status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp); + status->flag |= RX_FLAG_MACTIME_END; +} + +static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) +{ + struct sk_buff *first; + struct htt_rx_desc *rxd; + bool is_first_ppdu; + bool is_last_ppdu; + + if (skb_queue_empty(amsdu)) + return; + + first = skb_peek(amsdu); + rxd = (void *)first->data - sizeof(*rxd); + + is_first_ppdu = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); + is_last_ppdu = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); + + if (is_first_ppdu) { + /* New PPDU starts so clear out the old per-PPDU status. */ + status->freq = 0; + status->rate_idx = 0; + status->vht_nss = 0; + status->vht_flag &= ~RX_VHT_FLAG_80MHZ; + status->flag &= ~(RX_FLAG_HT | + RX_FLAG_VHT | + RX_FLAG_SHORT_GI | + RX_FLAG_40MHZ | + RX_FLAG_MACTIME_END); + status->flag |= RX_FLAG_NO_SIGNAL_VAL; + + ath10k_htt_rx_h_signal(ar, status, rxd); + ath10k_htt_rx_h_channel(ar, status); + ath10k_htt_rx_h_rates(ar, status, rxd); + } + + if (is_last_ppdu) + ath10k_htt_rx_h_mactime(ar, status, rxd); +} + static const char * const tid_to_ac[] = { "BE", "BK", @@ -892,6 +801,8 @@ static void ath10k_process_rx(struct ath10k *ar, !!(status->flag & RX_FLAG_AMSDU_MORE)); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", skb->data, skb->len); + trace_ath10k_rx_hdr(ar, skb->data, skb->len); + trace_ath10k_rx_payload(ar, skb->data, skb->len); ieee80211_rx(ar->hw, skb); } @@ -902,187 +813,263 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) return round_up(ieee80211_hdrlen(hdr->frame_control), 4); } -static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb_in) +static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + enum htt_rx_mpdu_encrypt_type enctype, + bool is_decrypted) { - struct ath10k *ar = htt->ar; + struct ieee80211_hdr *hdr; struct htt_rx_desc *rxd; - struct sk_buff *skb = skb_in; - struct sk_buff *first; - enum rx_msdu_decap_format fmt; - enum htt_rx_mpdu_encrypt_type enctype; + size_t hdr_len; + size_t crypto_len; + bool is_first; + bool is_last; + + rxd = (void *)msdu->data - sizeof(*rxd); + is_first = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + + /* Delivered decapped frame: + * [802.11 header] + * [crypto param] <-- can be trimmed if !fcs_err && + * !decrypt_err && !peer_idx_invalid + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + * [payload] + * [FCS] <-- at end, needs to be trimmed + */ + + /* This probably shouldn't happen but warn just in case */ + if (unlikely(WARN_ON_ONCE(!is_first))) + return; + + /* This probably shouldn't happen but warn just in case */ + if (unlikely(WARN_ON_ONCE(!(is_first && is_last)))) + return; + + skb_trim(msdu, msdu->len - FCS_LEN); + + /* In most cases this will be true for sniffed frames. It makes sense + * to deliver them as-is without stripping the crypto param. This would + * also make sense for software based decryption (which is not + * implemented in ath10k). + * + * If there's no error then the frame is decrypted. At least that is + * the case for frames that come in via fragmented rx indication. + */ + if (!is_decrypted) + return; + + /* The payload is decrypted so strip crypto params. Start from tail + * since hdr is used to compute some stuff. + */ + + hdr = (void *)msdu->data; + + /* Tail */ + skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype)); + + /* MMIC */ + if (!ieee80211_has_morefrags(hdr->frame_control) && + enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) + skb_trim(msdu, msdu->len - 8); + + /* Head */ + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + memmove((void *)msdu->data + crypto_len, + (void *)msdu->data, hdr_len); + skb_pull(msdu, crypto_len); +} + +static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64]) +{ struct ieee80211_hdr *hdr; - u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos; - unsigned int hdr_len; + size_t hdr_len; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; - rxd = (void *)skb->data - sizeof(*rxd); - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); + /* Delivered decapped frame: + * [nwifi 802.11 header] <-- replaced with 802.11 hdr + * [rfc1042/llc] + * + * Note: The nwifi header doesn't have QoS Control and is + * (always?) a 3addr frame. + * + * Note2: There's no A-MSDU subframe header. Even if it's part + * of an A-MSDU. + */ + + /* pull decapped header and copy SA & DA */ + hdr = (struct ieee80211_hdr *)msdu->data; + hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); + ether_addr_copy(da, ieee80211_get_DA(hdr)); + ether_addr_copy(sa, ieee80211_get_SA(hdr)); + skb_pull(msdu, hdr_len); - hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); - memcpy(hdr_buf, hdr, hdr_len); - hdr = (struct ieee80211_hdr *)hdr_buf; - - first = skb; - while (skb) { - void *decap_hdr; - int len; - - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - decap_hdr = (void *)rxd->rx_hdr_status; - - skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); - - /* First frame in an A-MSDU chain has more decapped data. */ - if (skb == first) { - len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); - len += round_up(ath10k_htt_rx_crypto_param_len(ar, - enctype), 4); - decap_hdr += len; - } + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); - switch (fmt) { - case RX_MSDU_DECAP_RAW: - /* remove trailing FCS */ - skb_trim(skb, skb->len - FCS_LEN); - break; - case RX_MSDU_DECAP_NATIVE_WIFI: - /* pull decapped header and copy SA & DA */ - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); - ether_addr_copy(da, ieee80211_get_DA(hdr)); - ether_addr_copy(sa, ieee80211_get_SA(hdr)); - skb_pull(skb, hdr_len); - - /* push original 802.11 header */ - hdr = (struct ieee80211_hdr *)hdr_buf; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - - /* original A-MSDU header has the bit set but we're - * not including A-MSDU subframe header */ - hdr = (struct ieee80211_hdr *)skb->data; - qos = ieee80211_get_qos_ctl(hdr); - qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - - /* original 802.11 header has a different DA and in - * case of 4addr it may also have different SA - */ - ether_addr_copy(ieee80211_get_DA(hdr), da); - ether_addr_copy(ieee80211_get_SA(hdr), sa); - break; - case RX_MSDU_DECAP_ETHERNET2_DIX: - /* strip ethernet header and insert decapped 802.11 - * header, amsdu subframe header and rfc1042 header */ + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); +} - len = 0; - len += sizeof(struct rfc1042_hdr); - len += sizeof(struct amsdu_subframe_hdr); +static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, + struct sk_buff *msdu, + enum htt_rx_mpdu_encrypt_type enctype) +{ + struct ieee80211_hdr *hdr; + struct htt_rx_desc *rxd; + size_t hdr_len, crypto_len; + void *rfc1042; + bool is_first, is_last, is_amsdu; - skb_pull(skb, sizeof(struct ethhdr)); - memcpy(skb_push(skb, len), decap_hdr, len); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - break; - case RX_MSDU_DECAP_8023_SNAP_LLC: - /* insert decapped 802.11 header making a singly - * A-MSDU */ - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - break; - } + rxd = (void *)msdu->data - sizeof(*rxd); + hdr = (void *)rxd->rx_hdr_status; - skb_in = skb; - ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt, - false); - skb = skb->next; - skb_in->next = NULL; + is_first = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); + is_last = !!(rxd->msdu_end.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); + is_amsdu = !(is_first && is_last); - if (skb) - rx_status->flag |= RX_FLAG_AMSDU_MORE; - else - rx_status->flag &= ~RX_FLAG_AMSDU_MORE; + rfc1042 = hdr; + + if (is_first) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); - ath10k_process_rx(htt->ar, rx_status, skb_in); + rfc1042 += round_up(hdr_len, 4) + + round_up(crypto_len, 4); } - /* FIXME: It might be nice to re-assemble the A-MSDU when there's a - * monitor interface active for sniffing purposes. */ + if (is_amsdu) + rfc1042 += sizeof(struct amsdu_subframe_hdr); + + return rfc1042; } -static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, - struct ieee80211_rx_status *rx_status, - struct sk_buff *skb) +static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype) { - struct ath10k *ar = htt->ar; - struct htt_rx_desc *rxd; struct ieee80211_hdr *hdr; - enum rx_msdu_decap_format fmt; - enum htt_rx_mpdu_encrypt_type enctype; - int hdr_len; + struct ethhdr *eth; + size_t hdr_len; void *rfc1042; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; - /* This shouldn't happen. If it does than it may be a FW bug. */ - if (skb->next) { - ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n"); - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; - } + /* Delivered decapped frame: + * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc + * [payload] + */ - rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); - hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); + if (WARN_ON_ONCE(!rfc1042)) + return; + + /* pull decapped header and copy SA & DA */ + eth = (struct ethhdr *)msdu->data; + ether_addr_copy(da, eth->h_dest); + ether_addr_copy(sa, eth->h_source); + skb_pull(msdu, sizeof(struct ethhdr)); + + /* push rfc1042/llc/snap */ + memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, + sizeof(struct rfc1042_hdr)); + + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); - skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_DA(hdr), da); + ether_addr_copy(ieee80211_get_SA(hdr), sa); +} - switch (fmt) { +static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + const u8 first_hdr[64]) +{ + struct ieee80211_hdr *hdr; + size_t hdr_len; + + /* Delivered decapped frame: + * [amsdu header] <-- replaced with 802.11 hdr + * [rfc1042/llc] + * [payload] + */ + + skb_pull(msdu, sizeof(struct amsdu_subframe_hdr)); + + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); +} + +static void ath10k_htt_rx_h_undecap(struct ath10k *ar, + struct sk_buff *msdu, + struct ieee80211_rx_status *status, + u8 first_hdr[64], + enum htt_rx_mpdu_encrypt_type enctype, + bool is_decrypted) +{ + struct htt_rx_desc *rxd; + enum rx_msdu_decap_format decap; + struct ieee80211_hdr *hdr; + + /* First msdu's decapped header: + * [802.11 header] <-- padded to 4 bytes long + * [crypto param] <-- padded to 4 bytes long + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + * + * Other (2nd, 3rd, ..) msdu's decapped header: + * [amsdu header] <-- only if A-MSDU + * [rfc1042/llc] + */ + + rxd = (void *)msdu->data - sizeof(*rxd); + hdr = (void *)rxd->rx_hdr_status; + decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); + + switch (decap) { case RX_MSDU_DECAP_RAW: - /* remove trailing FCS */ - skb_trim(skb, skb->len - FCS_LEN); + ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, + is_decrypted); break; case RX_MSDU_DECAP_NATIVE_WIFI: - /* Pull decapped header */ - hdr = (struct ieee80211_hdr *)skb->data; - hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); - skb_pull(skb, hdr_len); - - /* Push original header */ - hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); break; case RX_MSDU_DECAP_ETHERNET2_DIX: - /* strip ethernet header and insert decapped 802.11 header and - * rfc1042 header */ - - rfc1042 = hdr; - rfc1042 += roundup(hdr_len, 4); - rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar, - enctype), 4); - - skb_pull(skb, sizeof(struct ethhdr)); - memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), - rfc1042, sizeof(struct rfc1042_hdr)); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); break; case RX_MSDU_DECAP_8023_SNAP_LLC: - /* remove A-MSDU subframe header and insert - * decapped 802.11 header. rfc1042 header is already there */ - - skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); - memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); break; } - - ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false); - - ath10k_process_rx(htt->ar, rx_status, skb); } static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) @@ -1116,10 +1103,128 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) return CHECKSUM_UNNECESSARY; } -static int ath10k_unchain_msdu(struct sk_buff *msdu_head) +static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) +{ + msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu); +} + +static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) { - struct sk_buff *next = msdu_head->next; - struct sk_buff *to_free = next; + struct sk_buff *first; + struct sk_buff *last; + struct sk_buff *msdu; + struct htt_rx_desc *rxd; + struct ieee80211_hdr *hdr; + enum htt_rx_mpdu_encrypt_type enctype; + u8 first_hdr[64]; + u8 *qos; + size_t hdr_len; + bool has_fcs_err; + bool has_crypto_err; + bool has_tkip_err; + bool has_peer_idx_invalid; + bool is_decrypted; + u32 attention; + + if (skb_queue_empty(amsdu)) + return; + + first = skb_peek(amsdu); + rxd = (void *)first->data - sizeof(*rxd); + + enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), + RX_MPDU_START_INFO0_ENCRYPT_TYPE); + + /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 + * decapped header. It'll be used for undecapping of each MSDU. + */ + hdr = (void *)rxd->rx_hdr_status; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(first_hdr, hdr, hdr_len); + + /* Each A-MSDU subframe will use the original header as the base and be + * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. + */ + hdr = (void *)first_hdr; + qos = ieee80211_get_qos_ctl(hdr); + qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + /* Some attention flags are valid only in the last MSDU. */ + last = skb_peek_tail(amsdu); + rxd = (void *)last->data - sizeof(*rxd); + attention = __le32_to_cpu(rxd->attention.flags); + + has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); + has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); + has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); + has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); + + /* Note: If hardware captures an encrypted frame that it can't decrypt, + * e.g. due to fcs error, missing peer or invalid key data it will + * report the frame as raw. + */ + is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && + !has_fcs_err && + !has_crypto_err && + !has_peer_idx_invalid); + + /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ + status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | + RX_FLAG_MMIC_ERROR | + RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED); + + if (has_fcs_err) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (has_tkip_err) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (is_decrypted) + status->flag |= RX_FLAG_DECRYPTED | + RX_FLAG_IV_STRIPPED | + RX_FLAG_MMIC_STRIPPED; + + skb_queue_walk(amsdu, msdu) { + ath10k_htt_rx_h_csum_offload(msdu); + ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, + is_decrypted); + + /* Undecapping involves copying the original 802.11 header back + * to sk_buff. If frame is protected and hardware has decrypted + * it then remove the protected bit. + */ + if (!is_decrypted) + continue; + + hdr = (void *)msdu->data; + hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } +} + +static void ath10k_htt_rx_h_deliver(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *status) +{ + struct sk_buff *msdu; + + while ((msdu = __skb_dequeue(amsdu))) { + /* Setup per-MSDU flags */ + if (skb_queue_empty(amsdu)) + status->flag &= ~RX_FLAG_AMSDU_MORE; + else + status->flag |= RX_FLAG_AMSDU_MORE; + + ath10k_process_rx(ar, status, msdu); + } +} + +static int ath10k_unchain_msdu(struct sk_buff_head *amsdu) +{ + struct sk_buff *skb, *first; int space; int total_len = 0; @@ -1130,113 +1235,142 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head) * skb? */ - msdu_head->next = NULL; + first = __skb_dequeue(amsdu); /* Allocate total length all at once. */ - while (next) { - total_len += next->len; - next = next->next; - } + skb_queue_walk(amsdu, skb) + total_len += skb->len; - space = total_len - skb_tailroom(msdu_head); + space = total_len - skb_tailroom(first); if ((space > 0) && - (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) { + (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { /* TODO: bump some rx-oom error stat */ /* put it back together so we can free the * whole list at once. */ - msdu_head->next = to_free; + __skb_queue_head(amsdu, first); return -1; } /* Walk list again, copying contents into * msdu_head */ - next = to_free; - while (next) { - skb_copy_from_linear_data(next, skb_put(msdu_head, next->len), - next->len); - next = next->next; + while ((skb = __skb_dequeue(amsdu))) { + skb_copy_from_linear_data(skb, skb_put(first, skb->len), + skb->len); + dev_kfree_skb_any(skb); } - /* If here, we have consolidated skb. Free the - * fragments and pass the main skb on up the - * stack. - */ - ath10k_htt_rx_free_msdu_chain(to_free); + __skb_queue_head(amsdu, first); return 0; } -static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, - struct sk_buff *head, - enum htt_rx_mpdu_status status, - bool channel_set, - u32 attention) +static void ath10k_htt_rx_h_unchain(struct ath10k *ar, + struct sk_buff_head *amsdu, + bool chained) { - struct ath10k *ar = htt->ar; + struct sk_buff *first; + struct htt_rx_desc *rxd; + enum rx_msdu_decap_format decap; - if (head->len == 0) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx dropping due to zero-len\n"); - return false; - } + first = skb_peek(amsdu); + rxd = (void *)first->data - sizeof(*rxd); + decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + RX_MSDU_START_INFO1_DECAP_FORMAT); - if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx dropping due to decrypt-err\n"); - return false; - } + if (!chained) + return; - if (!channel_set) { - ath10k_warn(ar, "no channel configured; ignoring frame!\n"); - return false; + /* FIXME: Current unchaining logic can only handle simple case of raw + * msdu chaining. If decapping is other than raw the chaining may be + * more complex and this isn't handled by the current code. Don't even + * try re-constructing such frames - it'll be pretty much garbage. + */ + if (decap != RX_MSDU_DECAP_RAW || + skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) { + __skb_queue_purge(amsdu); + return; } - /* Skip mgmt frames while we handle this in WMI */ - if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || - attention & RX_ATTENTION_FLAGS_MGMT_TYPE) { - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); + ath10k_unchain_msdu(amsdu); +} + +static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status) +{ + struct sk_buff *msdu; + struct htt_rx_desc *rxd; + bool is_mgmt; + bool has_fcs_err; + + msdu = skb_peek(amsdu); + rxd = (void *)msdu->data - sizeof(*rxd); + + /* FIXME: It might be a good idea to do some fuzzy-testing to drop + * invalid/dangerous frames. + */ + + if (!rx_status->freq) { + ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); return false; } - if (status != HTT_RX_IND_MPDU_STATUS_OK && - status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && - status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && - !htt->ar->monitor_started) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx ignoring frame w/ status %d\n", - status); + is_mgmt = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); + has_fcs_err = !!(rxd->attention.flags & + __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR)); + + /* Management frames are handled via WMI events. The pros of such + * approach is that channel is explicitly provided in WMI events + * whereas HTT doesn't provide channel information for Rxed frames. + * + * However some firmware revisions don't report corrupted frames via + * WMI so don't drop them. + */ + if (is_mgmt && !has_fcs_err) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); return false; } - if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { - ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt rx CAC running\n"); + if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); return false; } return true; } +static void ath10k_htt_rx_h_filter(struct ath10k *ar, + struct sk_buff_head *amsdu, + struct ieee80211_rx_status *rx_status) +{ + if (skb_queue_empty(amsdu)) + return; + + if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) + return; + + __skb_queue_purge(amsdu); +} + static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication *rx) { struct ath10k *ar = htt->ar; struct ieee80211_rx_status *rx_status = &htt->rx_status; struct htt_rx_indication_mpdu_range *mpdu_ranges; - struct htt_rx_desc *rxd; - enum htt_rx_mpdu_status status; - struct ieee80211_hdr *hdr; + struct sk_buff_head amsdu; int num_mpdu_ranges; - u32 attention; int fw_desc_len; u8 *fw_desc; - bool channel_set; - int i, j; - int ret; + int i, ret, mpdu_count = 0; lockdep_assert_held(&htt->rx_ring.lock); + if (htt->rx_confused) + return; + fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes); fw_desc = (u8 *)&rx->fw_desc; @@ -1244,92 +1378,33 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); - /* Fill this once, while this is per-ppdu */ - if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) { - memset(rx_status, 0, sizeof(*rx_status)); - rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + - rx->ppdu.combined_rssi; - } - - if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) { - /* TSF available only in 32-bit */ - rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff; - rx_status->flag |= RX_FLAG_MACTIME_END; - } - - channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status); - - if (channel_set) { - ath10k_htt_rx_h_rates(htt->ar, rx_status->band, - rx->ppdu.info0, - __le32_to_cpu(rx->ppdu.info1), - __le32_to_cpu(rx->ppdu.info2), - rx_status); - } - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", rx, sizeof(*rx) + (sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges)); - for (i = 0; i < num_mpdu_ranges; i++) { - status = mpdu_ranges[i].mpdu_range_status; - - for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) { - struct sk_buff *msdu_head, *msdu_tail; - - attention = 0; - msdu_head = NULL; - msdu_tail = NULL; - ret = ath10k_htt_rx_amsdu_pop(htt, - &fw_desc, - &fw_desc_len, - &msdu_head, - &msdu_tail, - &attention); - - if (ret < 0) { - ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n", - ret); - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } - - rxd = container_of((void *)msdu_head->data, - struct htt_rx_desc, - msdu_payload); - - if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head, - status, - channel_set, - attention)) { - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } - - if (ret > 0 && - ath10k_unchain_msdu(msdu_head) < 0) { - ath10k_htt_rx_free_msdu_chain(msdu_head); - continue; - } - - if (attention & RX_ATTENTION_FLAGS_FCS_ERR) - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - else - rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC; - - if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR) - rx_status->flag |= RX_FLAG_MMIC_ERROR; - else - rx_status->flag &= ~RX_FLAG_MMIC_ERROR; - - hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); - - if (ath10k_htt_rx_hdr_is_amsdu(hdr)) - ath10k_htt_rx_amsdu(htt, rx_status, msdu_head); - else - ath10k_htt_rx_msdu(htt, rx_status, msdu_head); + for (i = 0; i < num_mpdu_ranges; i++) + mpdu_count += mpdu_ranges[i].mpdu_count; + + while (mpdu_count--) { + __skb_queue_head_init(&amsdu); + ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, + &fw_desc_len, &amsdu); + if (ret < 0) { + ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); + __skb_queue_purge(&amsdu); + /* FIXME: It's probably a good idea to reboot the + * device instead of leaving it inoperable. + */ + htt->rx_confused = true; + break; } + + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); } tasklet_schedule(&htt->rx_replenish_task); @@ -1339,108 +1414,44 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, struct htt_rx_fragment_indication *frag) { struct ath10k *ar = htt->ar; - struct sk_buff *msdu_head, *msdu_tail; - enum htt_rx_mpdu_encrypt_type enctype; - struct htt_rx_desc *rxd; - enum rx_msdu_decap_format fmt; struct ieee80211_rx_status *rx_status = &htt->rx_status; - struct ieee80211_hdr *hdr; + struct sk_buff_head amsdu; int ret; - bool tkip_mic_err; - bool decrypt_err; u8 *fw_desc; - int fw_desc_len, hdrlen, paramlen; - int trim; - u32 attention = 0; + int fw_desc_len; fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes); fw_desc = (u8 *)frag->fw_msdu_rx_desc; - msdu_head = NULL; - msdu_tail = NULL; + __skb_queue_head_init(&amsdu); spin_lock_bh(&htt->rx_ring.lock); ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len, - &msdu_head, &msdu_tail, - &attention); + &amsdu); spin_unlock_bh(&htt->rx_ring.lock); + tasklet_schedule(&htt->rx_replenish_task); + ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); if (ret) { ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", ret); - ath10k_htt_rx_free_msdu_chain(msdu_head); + __skb_queue_purge(&amsdu); return; } - /* FIXME: implement signal strength */ - rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; - - hdr = (struct ieee80211_hdr *)msdu_head->data; - rxd = (void *)msdu_head->data - sizeof(*rxd); - tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); - decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); - - if (fmt != RX_MSDU_DECAP_RAW) { - ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n"); - dev_kfree_skb_any(msdu_head); - goto end; - } - - enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), - RX_MPDU_START_INFO0_ENCRYPT_TYPE); - ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt, - true); - msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); - - if (tkip_mic_err) - ath10k_warn(ar, "tkip mic error\n"); - - if (decrypt_err) { - ath10k_warn(ar, "decryption err in fragmented rx\n"); - dev_kfree_skb_any(msdu_head); - goto end; - } - - if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) { - hdrlen = ieee80211_hdrlen(hdr->frame_control); - paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype); - - /* It is more efficient to move the header than the payload */ - memmove((void *)msdu_head->data + paramlen, - (void *)msdu_head->data, - hdrlen); - skb_pull(msdu_head, paramlen); - hdr = (struct ieee80211_hdr *)msdu_head->data; - } - - /* remove trailing FCS */ - trim = 4; - - /* remove crypto trailer */ - trim += ath10k_htt_rx_crypto_tail_len(ar, enctype); - - /* last fragment of TKIP frags has MIC */ - if (!ieee80211_has_morefrags(hdr->frame_control) && - enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) - trim += 8; - - if (trim > msdu_head->len) { - ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n"); - dev_kfree_skb_any(msdu_head); - goto end; + if (skb_queue_len(&amsdu) != 1) { + ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n"); + __skb_queue_purge(&amsdu); + return; } - skb_trim(msdu_head, msdu_head->len - trim); - - ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", - msdu_head->data, msdu_head->len); - ath10k_process_rx(htt->ar, rx_status, msdu_head); + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); -end: if (fw_desc_len > 0) { ath10k_dbg(ar, ATH10K_DBG_HTT, "expecting more fragmented rx in one indication %d\n", @@ -1674,6 +1685,15 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_RX_DELBA: ath10k_htt_rx_delba(ar, resp); break; + case HTT_T2H_MSG_TYPE_PKTLOG: { + struct ath10k_pktlog_hdr *hdr = + (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload; + + trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, + sizeof(*hdr) + + __le16_to_cpu(hdr->size)); + break; + } case HTT_T2H_MSG_TYPE_RX_FLUSH: { /* Ignore this event because mac80211 takes care of Rx * aggregation reordering. @@ -1681,8 +1701,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } default: - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n", - resp->hdr.msg_type); + ath10k_warn(ar, "htt event (%d) not handled\n", + resp->hdr.msg_type); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); break; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index bd87a35201d8..4bc51d8a14a3 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -92,7 +92,6 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; spin_lock_init(&htt->tx_lock); - init_waitqueue_head(&htt->empty_tx_wq); if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; @@ -555,14 +554,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); - skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); + skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); + skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); + trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, - "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n", + "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, - (u32)skb_cb->paddr, vdev_id, tid); + (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); + trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); + trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 3cf5702c1e7e..dfedfd0e0f34 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -20,15 +20,16 @@ #include "targaddrs.h" +#define ATH10K_FW_DIR "ath10k" + /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 /* QCA988X 2.0 definitions */ #define QCA988X_HW_2_0_VERSION 0x4100016c #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 -#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" +#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0" #define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin" #define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 @@ -43,6 +44,8 @@ #define REG_DUMP_COUNT_QCA988X 60 +#define QCA988X_CAL_DATA_LEN 2116 + struct ath10k_fw_ie { __le32 id; __le32 len; @@ -78,6 +81,15 @@ enum ath10k_mcast2ucast_mode { ATH10K_MCAST2UCAST_ENABLED = 1, }; +struct ath10k_pktlog_hdr { + __le16 flags; + __le16 missed_cnt; + __le16 log_type; + __le16 size; + __le32 timestamp; + u8 payload[0]; +} __packed; + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 @@ -85,11 +97,13 @@ enum ath10k_mcast2ucast_mode { #define TARGET_DMA_BURST_SIZE 0 #define TARGET_MAC_AGGR_DELIM 0 #define TARGET_AST_SKID_LIMIT 16 -#define TARGET_NUM_PEERS 16 +#define TARGET_NUM_STATIONS 16 +#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \ + (TARGET_NUM_VDEVS)) #define TARGET_NUM_OFFLOAD_PEERS 0 #define TARGET_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_NUM_PEER_KEYS 2 -#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS))) +#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2) #define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_TIMEOUT_LO_PRI 100 @@ -120,12 +134,15 @@ enum ath10k_mcast2ucast_mode { #define TARGET_10X_DMA_BURST_SIZE 0 #define TARGET_10X_MAC_AGGR_DELIM 0 #define TARGET_10X_AST_SKID_LIMIT 16 -#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS)) -#define TARGET_10X_NUM_PEERS_MAX 128 +#define TARGET_10X_NUM_STATIONS 128 +#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ + (TARGET_10X_NUM_VDEVS)) #define TARGET_10X_NUM_OFFLOAD_PEERS 0 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_10X_NUM_PEER_KEYS 2 -#define TARGET_10X_NUM_TIDS 256 +#define TARGET_10X_NUM_TIDS_MAX 256 +#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ + (TARGET_10X_NUM_PEERS) * 2) #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 @@ -279,6 +296,7 @@ enum ath10k_mcast2ucast_mode { #define SI_RX_DATA1_OFFSET 0x00000014 #define CORE_CTRL_CPU_INTR_MASK 0x00002000 +#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800 #define CORE_CTRL_ADDRESS 0x0000 #define PCIE_INTR_ENABLE_ADDRESS 0x0008 #define PCIE_INTR_CAUSE_ADDRESS 0x000c diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 46709301a51e..c4005670cba2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -136,7 +136,9 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, if (ret) return ret; + spin_lock_bh(&ar->data_lock); peer->keys[i] = arvif->wep_keys[i]; + spin_unlock_bh(&ar->data_lock); } return 0; @@ -173,12 +175,39 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); + spin_lock_bh(&ar->data_lock); peer->keys[i] = NULL; + spin_unlock_bh(&ar->data_lock); } return first_errno; } +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx) +{ + struct ath10k_peer *peer; + int i; + + lockdep_assert_held(&ar->data_lock); + + /* We don't know which vdev this peer belongs to, + * since WMI doesn't give us that information. + * + * FIXME: multi-bss needs to be handled. + */ + peer = ath10k_peer_find(ar, 0, addr); + if (!peer) + return false; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) + return true; + } + + return false; +} + static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, struct ieee80211_key_conf *key) { @@ -326,6 +355,9 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) lockdep_assert_held(&ar->conf_mutex); + if (ar->num_peers >= ar->max_num_peers) + return -ENOBUFS; + ret = ath10k_wmi_peer_create(ar, vdev_id, addr); if (ret) { ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", @@ -339,9 +371,8 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) addr, vdev_id, ret); return ret; } - spin_lock_bh(&ar->data_lock); + ar->num_peers++; - spin_unlock_bh(&ar->data_lock); return 0; } @@ -391,15 +422,11 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) return 0; } -static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) +static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) { struct ath10k *ar = arvif->ar; u32 vdev_param; - if (value != 0xFFFFFFFF) - value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold, - ATH10K_RTS_MAX); - vdev_param = ar->wmi.vdev_param->rts_threshold; return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } @@ -432,9 +459,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) if (ret) return ret; - spin_lock_bh(&ar->data_lock); ar->num_peers--; - spin_unlock_bh(&ar->data_lock); return 0; } @@ -471,20 +496,59 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar) list_del(&peer->list); kfree(peer); } - ar->num_peers = 0; spin_unlock_bh(&ar->data_lock); + + ar->num_peers = 0; + ar->num_stations = 0; } /************************/ /* Interface management */ /************************/ +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + if (!arvif->beacon) + return; + + if (!arvif->beacon_buf) + dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, + arvif->beacon->len, DMA_TO_DEVICE); + + dev_kfree_skb_any(arvif->beacon); + + arvif->beacon = NULL; + arvif->beacon_sent = false; +} + +static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + ath10k_mac_vif_beacon_free(arvif); + + if (arvif->beacon_buf) { + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } +} + static inline int ath10k_vdev_setup_sync(struct ath10k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + return -ESHUTDOWN; + ret = wait_for_completion_timeout(&ar->vdev_setup_done, ATH10K_VDEV_SETUP_TIMEOUT_HZ); if (ret == 0) @@ -517,6 +581,8 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) arg.channel.max_reg_power = channel->max_reg_power * 2; arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; + reinit_completion(&ar->vdev_setup_done); + ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", @@ -564,6 +630,8 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar) ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); + reinit_completion(&ar->vdev_setup_done); + ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", @@ -590,9 +658,9 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar) return -ENOMEM; } - bit = ffs(ar->free_vdev_map); + bit = __ffs64(ar->free_vdev_map); - ar->monitor_vdev_id = bit - 1; + ar->monitor_vdev_id = bit; ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, WMI_VDEV_TYPE_MONITOR, @@ -603,7 +671,7 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar) return ret; } - ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); @@ -623,7 +691,7 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar) return ret; } - ar->free_vdev_map |= 1 << ar->monitor_vdev_id; + ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ar->monitor_vdev_id); @@ -909,15 +977,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, arvif->is_up = false; spin_lock_bh(&arvif->ar->data_lock); - if (arvif->beacon) { - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - - arvif->beacon = NULL; - arvif->beacon_sent = false; - } + ath10k_mac_vif_beacon_free(arvif); spin_unlock_bh(&arvif->ar->data_lock); return; @@ -966,14 +1026,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, if (is_zero_ether_addr(arvif->bssid)) return; - ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, - arvif->bssid); - if (ret) { - ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", - arvif->bssid, arvif->vdev_id, ret); - return; - } - memset(arvif->bssid, 0, ETH_ALEN); return; @@ -1042,51 +1094,45 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) /* Station management */ /**********************/ +static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, + struct ieee80211_vif *vif) +{ + /* Some firmware revisions have unstable STA powersave when listen + * interval is set too high (e.g. 5). The symptoms are firmware doesn't + * generate NullFunc frames properly even if buffered frames have been + * indicated in Beacon TIM. Firmware would seldom wake up to pull + * buffered frames. Often pinging the device from AP would simply fail. + * + * As a workaround set it to 1. + */ + if (vif->type == NL80211_IFTYPE_STATION) + return 1; + + return ar->hw->conf.listen_interval; +} + static void ath10k_peer_assoc_h_basic(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + lockdep_assert_held(&ar->conf_mutex); ether_addr_copy(arg->addr, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_aid = sta->aid; arg->peer_flags |= WMI_PEER_AUTH; - - if (arvif->vdev_type == WMI_VDEV_TYPE_STA) - /* - * Seems FW have problems with Power Save in STA - * mode when we setup this parameter to high (eg. 5). - * Often we see that FW don't send NULL (with clean P flags) - * frame even there is info about buffered frames in beacons. - * Sometimes we have to wait more than 10 seconds before FW - * will wakeup. Often sending one ping from AP to our device - * just fail (more than 50%). - * - * Seems setting this FW parameter to 1 couse FW - * will check every beacon and will wakup immediately - * after detection buffered data. - */ - arg->peer_listen_intval = 1; - else - arg->peer_listen_intval = ar->hw->conf.listen_interval; - + arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); arg->peer_num_spatial_streams = 1; - - /* - * The assoc capabilities are available only in managed mode. - */ - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf) - arg->peer_caps = bss_conf->assoc_capability; + arg->peer_caps = vif->bss_conf.assoc_capability; } static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct wmi_peer_assoc_complete_arg *arg) { - struct ieee80211_vif *vif = arvif->vif; struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_bss *bss; const u8 *rsnie = NULL; @@ -1343,11 +1389,12 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, } static void ath10k_peer_assoc_h_qos(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) @@ -1359,7 +1406,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } break; case WMI_VDEV_TYPE_STA: - if (bss_conf->qos) + if (vif->bss_conf.qos) arg->peer_flags |= WMI_PEER_QOS; break; default: @@ -1368,7 +1415,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, } static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { @@ -1419,22 +1466,21 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, } static int ath10k_peer_assoc_prepare(struct ath10k *ar, - struct ath10k_vif *arvif, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf, struct wmi_peer_assoc_complete_arg *arg) { lockdep_assert_held(&ar->conf_mutex); memset(arg, 0, sizeof(*arg)); - ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg); - ath10k_peer_assoc_h_crypto(ar, arvif, arg); + ath10k_peer_assoc_h_basic(ar, vif, sta, arg); + ath10k_peer_assoc_h_crypto(ar, vif, arg); ath10k_peer_assoc_h_rates(ar, sta, arg); ath10k_peer_assoc_h_ht(ar, sta, arg); ath10k_peer_assoc_h_vht(ar, sta, arg); - ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg); - ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg); + ath10k_peer_assoc_h_qos(ar, vif, sta, arg); + ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); return 0; } @@ -1480,6 +1526,9 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, lockdep_assert_held(&ar->conf_mutex); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", + arvif->vdev_id, arvif->bssid, arvif->aid); + rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); @@ -1494,8 +1543,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, * before calling ath10k_setup_peer_smps() which might sleep. */ ht_cap = ap_sta->ht_cap; - ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, - bss_conf, &peer_arg); + ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); if (ret) { ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); @@ -1523,6 +1571,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + WARN_ON(arvif->is_up); + arvif->aid = bss_conf->aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); @@ -1536,9 +1586,6 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, arvif->is_up = true; } -/* - * FIXME: flush TIDs - */ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1548,45 +1595,30 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, lockdep_assert_held(&ar->conf_mutex); - /* - * For some reason, calling VDEV-DOWN before VDEV-STOP - * makes the FW to send frames via HTT after disassociation. - * No idea why this happens, even though VDEV-DOWN is supposed - * to be analogous to link down, so just stop the VDEV. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", - arvif->vdev_id); - - /* FIXME: check return value */ - ret = ath10k_vdev_stop(arvif); - - /* - * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and - * report beacons from previously associated network through HTT. - * This in turn would spam mac80211 WARN_ON if we bring down all - * interfaces as it expects there is no rx when no interface is - * running. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", + arvif->vdev_id, arvif->bssid); - /* FIXME: why don't we print error if wmi call fails? */ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath10k_warn(ar, "faield to down vdev %i: %d\n", + arvif->vdev_id, ret); arvif->def_wep_key_idx = 0; - - arvif->is_started = false; arvif->is_up = false; } -static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, - struct ieee80211_sta *sta, bool reassoc) +static int ath10k_station_assoc(struct ath10k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool reassoc) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); + ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); if (ret) { ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@ -1601,43 +1633,51 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, return ret; } - ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); - if (ret) { - ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", - arvif->vdev_id, ret); - return ret; - } - - if (!sta->wme && !reassoc) { - arvif->num_legacy_stations++; - ret = ath10k_recalc_rtscts_prot(arvif); + /* Re-assoc is run only to update supported rates for given station. It + * doesn't make much sense to reconfigure the peer completely. + */ + if (!reassoc) { + ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, + &sta->ht_cap); if (ret) { - ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } - } - ret = ath10k_install_peer_wep_keys(arvif, sta->addr); - if (ret) { - ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", - arvif->vdev_id, ret); - return ret; - } + ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); + if (ret) { + ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", + sta->addr, arvif->vdev_id, ret); + return ret; + } - ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); - if (ret) { - ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", - sta->addr, arvif->vdev_id, ret); - return ret; + if (!sta->wme) { + arvif->num_legacy_stations++; + ret = ath10k_recalc_rtscts_prot(arvif); + if (ret) { + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + ret = ath10k_install_peer_wep_keys(arvif, sta->addr); + if (ret) { + ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } } return ret; } -static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, +static int ath10k_station_disassoc(struct ath10k *ar, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@ -1729,6 +1769,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) ch->passive = passive; ch->freq = channel->center_freq; + ch->band_center_freq1 = channel->center_freq; ch->min_power = 0; ch->max_power = channel->max_power * 2; ch->max_reg_power = channel->max_reg_power * 2; @@ -1983,6 +2024,18 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, } } +static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) +{ + /* FIXME: Not really sure since when the behaviour changed. At some + * point new firmware stopped requiring creation of peer entries for + * offchannel tx (and actually creating them causes issues with wmi-htc + * tx credit replenishment and reliability). Assuming it's at least 3.4 + * because that's when the `freq` was introduced to TX_FRM HTT command. + */ + return !(ar->htt.target_version_major >= 3 && + ar->htt.target_version_minor >= 4); +} + static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -2158,10 +2211,10 @@ void __ath10k_scan_finish(struct ath10k *ar) case ATH10K_SCAN_IDLE: break; case ATH10K_SCAN_RUNNING: - case ATH10K_SCAN_ABORTING: if (ar->scan.is_roc) ieee80211_remain_on_channel_expired(ar->hw); - else + case ATH10K_SCAN_ABORTING: + if (!ar->scan.is_roc) ieee80211_scan_completed(ar->hw, (ar->scan.state == ATH10K_SCAN_ABORTING)); @@ -2327,23 +2380,28 @@ static void ath10k_tx(struct ieee80211_hw *hw, if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { spin_lock_bh(&ar->data_lock); - ATH10K_SKB_CB(skb)->htt.is_offchan = true; + ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; spin_unlock_bh(&ar->data_lock); - ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", - skb); + if (ath10k_mac_need_offchan_tx_work(ar)) { + ATH10K_SKB_CB(skb)->htt.freq = 0; + ATH10K_SKB_CB(skb)->htt.is_offchan = true; - skb_queue_tail(&ar->offchan_tx_queue, skb); - ieee80211_queue_work(hw, &ar->offchan_tx_work); - return; + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", + skb); + + skb_queue_tail(&ar->offchan_tx_queue, skb); + ieee80211_queue_work(hw, &ar->offchan_tx_work); + return; + } } ath10k_tx_htt(ar, skb); } /* Must not be called with conf_mutex held as workers can use that also. */ -static void ath10k_drain_tx(struct ath10k *ar) +void ath10k_drain_tx(struct ath10k *ar) { /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); @@ -2376,16 +2434,8 @@ void ath10k_halt(struct ath10k *ar) ath10k_hif_power_down(ar); spin_lock_bh(&ar->data_lock); - list_for_each_entry(arvif, &ar->arvifs, list) { - if (!arvif->beacon) - continue; - - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; - } + list_for_each_entry(arvif, &ar->arvifs, list) + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); } @@ -2408,12 +2458,28 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) return 0; } +static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) +{ + /* It is not clear that allowing gaps in chainmask + * is helpful. Probably it will not do what user + * is hoping for, so warn in that case. + */ + if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) + return; + + ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", + dbg, cm); +} + static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) { int ret; lockdep_assert_held(&ar->conf_mutex); + ath10k_check_chain_mask(ar, tx_ant, "tx"); + ath10k_check_chain_mask(ar, rx_ant, "rx"); + ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; @@ -2677,12 +2743,68 @@ static void ath10k_config_chan(struct ath10k *ar) ath10k_monitor_recalc(ar); } +static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) +{ + int ret; + u32 param; + + lockdep_assert_held(&ar->conf_mutex); + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); + + param = ar->wmi.pdev_param->txpower_limit2g; + ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); + if (ret) { + ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", + txpower, ret); + return ret; + } + + param = ar->wmi.pdev_param->txpower_limit5g; + ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); + if (ret) { + ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + +static int ath10k_mac_txpower_recalc(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int ret, txpower = -1; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + WARN_ON(arvif->txpower < 0); + + if (txpower == -1) + txpower = arvif->txpower; + else + txpower = min(txpower, arvif->txpower); + } + + if (WARN_ON(txpower == -1)) + return -EINVAL; + + ret = ath10k_mac_txpower_setup(ar, txpower); + if (ret) { + ath10k_warn(ar, "failed to setup tx power %d: %d\n", + txpower, ret); + return ret; + } + + return 0; +} + static int ath10k_config(struct ieee80211_hw *hw, u32 changed) { struct ath10k *ar = hw->priv; struct ieee80211_conf *conf = &hw->conf; int ret = 0; - u32 param; mutex_lock(&ar->conf_mutex); @@ -2706,25 +2828,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n", - hw->conf.power_level); - - param = ar->wmi.pdev_param->txpower_limit2g; - ret = ath10k_wmi_pdev_set_param(ar, param, - hw->conf.power_level * 2); - if (ret) - ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", - hw->conf.power_level, ret); - - param = ar->wmi.pdev_param->txpower_limit5g; - ret = ath10k_wmi_pdev_set_param(ar, param, - hw->conf.power_level * 2); - if (ret) - ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", - hw->conf.power_level, ret); - } - if (changed & IEEE80211_CONF_CHANGE_PS) ath10k_config_ps(ar); @@ -2739,6 +2842,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) return ret; } +static u32 get_nss_from_chainmask(u16 chain_mask) +{ + if ((chain_mask & 0x15) == 0x15) + return 4; + else if ((chain_mask & 0x7) == 0x7) + return 3; + else if ((chain_mask & 0x3) == 0x3) + return 2; + return 1; +} + /* * TODO: * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, @@ -2772,9 +2886,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = -EBUSY; goto err; } - bit = ffs(ar->free_vdev_map); + bit = __ffs64(ar->free_vdev_map); - arvif->vdev_id = bit - 1; + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", + bit, ar->free_vdev_map); + + arvif->vdev_id = bit; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; if (ar->p2p) @@ -2804,8 +2921,39 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, break; } - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", - arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); + /* Some firmware revisions don't wait for beacon tx completion before + * sending another SWBA event. This could lead to hardware using old + * (freed) beacon data in some cases, e.g. tx credit starvation + * combined with missed TBTT. This is very very rare. + * + * On non-IOMMU-enabled hosts this could be a possible security issue + * because hw could beacon some random data on the air. On + * IOMMU-enabled hosts DMAR faults would occur in most cases and target + * device would crash. + * + * Since there are no beacon tx completions (implicit nor explicit) + * propagated to host the only workaround for this is to allocate a + * DMA-coherent buffer for a lifetime of a vif and use it for all + * beacon tx commands. Worst case for this approach is some beacons may + * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. + */ + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_AP) { + arvif->beacon_buf = dma_zalloc_coherent(ar->dev, + IEEE80211_MAX_FRAME_LEN, + &arvif->beacon_paddr, + GFP_ATOMIC); + if (!arvif->beacon_buf) { + ret = -ENOMEM; + ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", + ret); + goto err; + } + } + + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + arvif->beacon_buf ? "single-buf" : "per-skb"); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); @@ -2815,7 +2963,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err; } - ar->free_vdev_map &= ~(1 << arvif->vdev_id); + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); list_add(&arvif->list, &ar->arvifs); vdev_param = ar->wmi.vdev_param->def_keyid; @@ -2837,6 +2985,20 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_vdev_delete; } + if (ar->cfg_tx_chainmask) { + u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + + vdev_param = ar->wmi.vdev_param->nss; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + nss); + if (ret) { + ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", + arvif->vdev_id, ar->cfg_tx_chainmask, nss, + ret); + goto err_vdev_delete; + } + } + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); if (ret) { @@ -2899,6 +3061,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_peer_delete; } + arvif->txpower = vif->bss_conf.txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) { + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + goto err_peer_delete; + } + mutex_unlock(&ar->conf_mutex); return 0; @@ -2908,10 +3077,16 @@ err_peer_delete: err_vdev_delete: ath10k_wmi_vdev_delete(ar, arvif->vdev_id); - ar->free_vdev_map |= 1 << arvif->vdev_id; + ar->free_vdev_map |= 1LL << arvif->vdev_id; list_del(&arvif->list); err: + if (arvif->beacon_buf) { + dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, + arvif->beacon_buf, arvif->beacon_paddr); + arvif->beacon_buf = NULL; + } + mutex_unlock(&ar->conf_mutex); return ret; @@ -2924,19 +3099,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret; - mutex_lock(&ar->conf_mutex); - cancel_work_sync(&arvif->wep_key_work); - spin_lock_bh(&ar->data_lock); - if (arvif->beacon) { - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; - } + mutex_lock(&ar->conf_mutex); + spin_lock_bh(&ar->data_lock); + ath10k_mac_vif_beacon_cleanup(arvif); spin_unlock_bh(&ar->data_lock); ret = ath10k_spectral_vif_stop(arvif); @@ -2944,7 +3112,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", arvif->vdev_id, ret); - ar->free_vdev_map |= 1 << arvif->vdev_id; + ar->free_vdev_map |= 1LL << arvif->vdev_id; list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { @@ -3068,54 +3236,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, arvif->u.ap.hidden_ssid = info->hidden_ssid; } - /* - * Firmware manages AP self-peer internally so make sure to not create - * it in driver. Otherwise AP self-peer deletion may timeout later. - */ - if (changed & BSS_CHANGED_BSSID && - vif->type != NL80211_IFTYPE_AP) { - if (!is_zero_ether_addr(info->bssid)) { - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d create peer %pM\n", - arvif->vdev_id, info->bssid); - - ret = ath10k_peer_create(ar, arvif->vdev_id, - info->bssid); - if (ret) - ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n", - info->bssid, arvif->vdev_id, ret); - - if (vif->type == NL80211_IFTYPE_STATION) { - /* - * this is never erased as we it for crypto key - * clearing; this is FW requirement - */ - ether_addr_copy(arvif->bssid, info->bssid); - - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d start %pM\n", - arvif->vdev_id, info->bssid); - - ret = ath10k_vdev_start(arvif); - if (ret) { - ath10k_warn(ar, "failed to start vdev %i: %d\n", - arvif->vdev_id, ret); - goto exit; - } - - arvif->is_started = true; - } - - /* - * Mac80211 does not keep IBSS bssid when leaving IBSS, - * so driver need to store it. It is needed when leaving - * IBSS in order to remove BSSID peer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) - memcpy(arvif->bssid, info->bssid, - ETH_ALEN); - } - } + if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) + ether_addr_copy(arvif->bssid, info->bssid); if (changed & BSS_CHANGED_BEACON_ENABLED) ath10k_control_beaconing(arvif, info); @@ -3177,10 +3299,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ath10k_monitor_stop(ar); ath10k_bss_assoc(hw, vif, info); ath10k_monitor_recalc(ar); + } else { + ath10k_bss_disassoc(hw, vif); } } -exit: + if (changed & BSS_CHANGED_TXPOWER) { + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", + arvif->vdev_id, info->txpower); + + arvif->txpower = info->txpower; + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + } + mutex_unlock(&ar->conf_mutex); } @@ -3266,9 +3399,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); } static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, @@ -3453,7 +3587,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", sta->addr); - err = ath10k_station_assoc(ar, arvif, sta, true); + err = ath10k_station_assoc(ar, arvif->vif, sta, true); if (err) ath10k_warn(ar, "failed to reassociate station: %pM\n", sta->addr); @@ -3462,6 +3596,37 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) mutex_unlock(&ar->conf_mutex); } +static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return 0; + + if (ar->num_stations >= ar->max_num_stations) + return -ENOBUFS; + + ar->num_stations++; + + return 0; +} + +static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif) +{ + struct ath10k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_IBSS) + return; + + ar->num_stations--; +} + static int ath10k_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3471,7 +3636,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; - int max_num_peers; int ret = 0; if (old_state == IEEE80211_STA_NOTEXIST && @@ -3489,31 +3653,46 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE && - vif->type != NL80211_IFTYPE_STATION) { + new_state == IEEE80211_STA_NONE) { /* * New station addition. */ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1; - else - max_num_peers = TARGET_NUM_PEERS; + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", + arvif->vdev_id, sta->addr, + ar->num_stations + 1, ar->max_num_stations, + ar->num_peers + 1, ar->max_num_peers); - if (ar->num_peers >= max_num_peers) { - ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n", - ar->num_peers, max_num_peers); - ret = -ENOBUFS; + ret = ath10k_mac_inc_num_stations(arvif); + if (ret) { + ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", + ar->max_num_stations); goto exit; } - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac vdev %d peer create %pM (new sta) num_peers %d\n", - arvif->vdev_id, sta->addr, ar->num_peers); - ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); - if (ret) + if (ret) { ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); + ath10k_mac_dec_num_stations(arvif); + goto exit; + } + + if (vif->type == NL80211_IFTYPE_STATION) { + WARN_ON(arvif->is_started); + + ret = ath10k_vdev_start(arvif); + if (ret) { + ath10k_warn(ar, "failed to start vdev %i: %d\n", + arvif->vdev_id, ret); + WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id, + sta->addr)); + ath10k_mac_dec_num_stations(arvif); + goto exit; + } + + arvif->is_started = true; + } } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* @@ -3522,13 +3701,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer delete %pM (sta gone)\n", arvif->vdev_id, sta->addr); + + if (vif->type == NL80211_IFTYPE_STATION) { + WARN_ON(!arvif->is_started); + + ret = ath10k_vdev_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop vdev %i: %d\n", + arvif->vdev_id, ret); + + arvif->is_started = false; + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", sta->addr, arvif->vdev_id, ret); - if (vif->type == NL80211_IFTYPE_STATION) - ath10k_bss_disassoc(hw, vif); + ath10k_mac_dec_num_stations(arvif); } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || @@ -3539,7 +3729,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", sta->addr); - ret = ath10k_station_assoc(ar, arvif, sta, false); + ret = ath10k_station_assoc(ar, vif, sta, false); if (ret) ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@ -3553,7 +3743,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", sta->addr); - ret = ath10k_station_disassoc(ar, arvif, sta); + ret = ath10k_station_disassoc(ar, vif, sta); if (ret) ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); @@ -3717,6 +3907,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, if (ret) goto exit; + duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC); + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3761,10 +3953,11 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); + cancel_delayed_work_sync(&ar->scan.timeout); + return 0; } @@ -3807,7 +4000,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", arvif->vdev_id, value); - ret = ath10k_mac_set_rts(arvif, value); + ret = ath10k_mac_set_frag(arvif, value); if (ret) { ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n", arvif->vdev_id, ret); @@ -3843,7 +4036,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, empty = (ar->htt.num_pending_tx == 0); spin_unlock_bh(&ar->htt.tx_lock); - skip = (ar->state == ATH10K_STATE_WEDGED); + skip = (ar->state == ATH10K_STATE_WEDGED) || + test_bit(ATH10K_FLAG_CRASH_FLUSH, + &ar->dev_flags); (empty || skip); }), ATH10K_FLUSH_TIMEOUT_HZ); @@ -3929,10 +4124,14 @@ exit: } #endif -static void ath10k_restart_complete(struct ieee80211_hw *hw) +static void ath10k_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) { struct ath10k *ar = hw->priv; + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) + return; + mutex_lock(&ar->conf_mutex); /* If device failed to restart it will be in a different state, e.g. @@ -3940,6 +4139,7 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw) if (ar->state == ATH10K_STATE_RESTARTED) { ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; + ieee80211_wake_queues(ar->hw); } mutex_unlock(&ar->conf_mutex); @@ -3975,6 +4175,9 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, survey->channel = &sband->channels[idx]; + if (ar->rx_channel == survey->channel) + survey->filled |= SURVEY_INFO_IN_USE; + exit: mutex_unlock(&ar->conf_mutex); return ret; @@ -4022,6 +4225,10 @@ ath10k_default_bitrate_mask(struct ath10k *ar, u32 legacy = 0x00ff; u8 ht = 0xff, i; u16 vht = 0x3ff; + u16 nrf = ar->num_rf_chains; + + if (ar->cfg_tx_chainmask) + nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask); switch (band) { case IEEE80211_BAND_2GHZ: @@ -4037,11 +4244,11 @@ ath10k_default_bitrate_mask(struct ath10k *ar, if (mask->control[band].legacy != legacy) return false; - for (i = 0; i < ar->num_rf_chains; i++) + for (i = 0; i < nrf; i++) if (mask->control[band].ht_mcs[i] != ht) return false; - for (i = 0; i < ar->num_rf_chains; i++) + for (i = 0; i < nrf; i++) if (mask->control[band].vht_mcs[i] != vht) return false; @@ -4292,6 +4499,9 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw, u8 fixed_nss = ar->num_rf_chains; u8 force_sgi; + if (ar->cfg_tx_chainmask) + fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); + force_sgi = mask->control[band].gi; if (force_sgi == NL80211_TXRATE_FORCE_LGI) return -EINVAL; @@ -4450,12 +4660,15 @@ static const struct ieee80211_ops ath10k_ops = { .tx_last_beacon = ath10k_tx_last_beacon, .set_antenna = ath10k_set_antenna, .get_antenna = ath10k_get_antenna, - .restart_complete = ath10k_restart_complete, + .reconfig_complete = ath10k_reconfig_complete, .get_survey = ath10k_get_survey, .set_bitrate_mask = ath10k_set_bitrate_mask, .sta_rc_update = ath10k_sta_rc_update, .get_tsf = ath10k_get_tsf, .ampdu_action = ath10k_ampdu_action, + .get_et_sset_count = ath10k_debug_get_et_sset_count, + .get_et_stats = ath10k_debug_get_et_stats, + .get_et_strings = ath10k_debug_get_et_strings, CFG80211_TESTMODE_CMD(ath10k_tm_cmd) @@ -4800,15 +5013,6 @@ int ath10k_mac_register(struct ath10k *ar) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - /* TODO: Have to deal with 2x2 chips if/when the come out. */ - ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK; - ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK; - } else { - ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK; - ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK; - } - ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; @@ -4827,10 +5031,6 @@ int ath10k_mac_register(struct ath10k *ar) IEEE80211_HW_AP_LINK_PS | IEEE80211_HW_SPECTRUM_MGMT; - /* MSDU can have HTT TX fragment pushed in front. The additional 4 - * bytes is used for padding/alignment if necessary. */ - ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4; - ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) @@ -4854,6 +5054,8 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 6c80eeada3e2..68296117d203 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -21,6 +21,8 @@ #include <net/mac80211.h> #include "core.h" +#define WEP_KEYID_SHIFT 6 + struct ath10k_generic_iter { struct ath10k *ar; int ret; @@ -39,6 +41,10 @@ void ath10k_offchan_tx_work(struct work_struct *work); void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work); void ath10k_halt(struct ath10k *ar); +void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif); +void ath10k_drain_tx(struct ath10k *ar); +bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, + u8 keyidx); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 59e0ea83be50..7abb8367119a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -485,6 +485,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; + spin_lock_bh(&ar_pci->ce_lock); + ce_diag = ar_pci->ce_diag; /* @@ -511,7 +513,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); + ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); if (ret != 0) goto done; @@ -527,15 +529,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, - 0); + ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, + 0); if (ret) goto done; i = 0; - while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id) != 0) { + while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { ret = -EBUSY; @@ -554,9 +556,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } i = 0; - while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id, &flags) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -591,6 +593,8 @@ done: dma_free_coherent(ar->dev, orig_nbytes, data_buf, ce_data_base); + spin_unlock_bh(&ar_pci->ce_lock); + return ret; } @@ -648,6 +652,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; + spin_lock_bh(&ar_pci->ce_lock); + ce_diag = ar_pci->ce_diag; /* @@ -688,7 +694,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address); + ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); if (ret != 0) goto done; @@ -696,15 +702,15 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data, - nbytes, 0, 0); + ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, + nbytes, 0, 0); if (ret != 0) goto done; i = 0; - while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id) != 0) { + while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -724,9 +730,9 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } i = 0; - while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf, - &completed_nbytes, - &id, &flags) != 0) { + while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, + &completed_nbytes, + &id, &flags) != 0) { mdelay(1); if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { @@ -760,6 +766,8 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); + spin_unlock_bh(&ar_pci->ce_lock); + return ret; } @@ -815,20 +823,24 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; - void *transfer_context; + struct sk_buff_head list; + struct sk_buff *skb; u32 ce_data; unsigned int nbytes; unsigned int transfer_id; - while (ath10k_ce_completed_send_next(ce_state, &transfer_context, - &ce_data, &nbytes, - &transfer_id) == 0) { + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, + &nbytes, &transfer_id) == 0) { /* no need to call tx completion for NULL pointers */ - if (transfer_context == NULL) + if (skb == NULL) continue; - cb->tx_completion(ar, transfer_context, transfer_id); + __skb_queue_tail(&list, skb); } + + while ((skb = __skb_dequeue(&list))) + cb->tx_completion(ar, skb); } /* Called by lower (CE) layer when data is received from the Target. */ @@ -839,12 +851,14 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; struct sk_buff *skb; + struct sk_buff_head list; void *transfer_context; u32 ce_data; unsigned int nbytes, max_nbytes; unsigned int transfer_id; unsigned int flags; + __skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, &ce_data, &nbytes, &transfer_id, &flags) == 0) { @@ -861,7 +875,16 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) } skb_put(skb, nbytes); - cb->rx_completion(ar, skb, pipe_info->pipe_num); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", + ce_state->id, skb->len); + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", + skb->data, skb->len); + + cb->rx_completion(ar, skb); } ath10k_pci_rx_post_pipe(pipe_info); @@ -936,6 +959,12 @@ err: return err; } +static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); +} + static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -986,6 +1015,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) spin_lock_bh(&ar->data_lock); + ar->stats.fw_crash_counter++; + crash_data = ath10k_debug_get_new_fw_crash_data(ar); if (crash_data) @@ -1121,14 +1152,37 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, &dl_is_polled); } -static void ath10k_pci_irq_disable(struct ath10k *ar) +static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; + u32 val; + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); + val &= ~CORE_CTRL_PCIE_REG_31_MASK; + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); +} +static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); + val |= CORE_CTRL_PCIE_REG_31_MASK; + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); +} + +static void ath10k_pci_irq_disable(struct ath10k *ar) +{ ath10k_ce_disable_interrupts(ar); ath10k_pci_disable_and_clear_legacy_irq(ar); - /* FIXME: How to mask all MSI interrupts? */ + ath10k_pci_irq_msi_fw_mask(ar); +} + +static void ath10k_pci_irq_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) synchronize_irq(ar_pci->pdev->irq + i); @@ -1138,7 +1192,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar) { ath10k_ce_enable_interrupts(ar); ath10k_pci_enable_legacy_irq(ar); - /* FIXME: How to unmask all MSI interrupts? */ + ath10k_pci_irq_msi_fw_unmask(ar); } static int ath10k_pci_hif_start(struct ath10k *ar) @@ -1151,64 +1205,74 @@ static int ath10k_pci_hif_start(struct ath10k *ar) return 0; } -static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; - struct ath10k_pci *ar_pci; - struct ath10k_ce_pipe *ce_hdl; - u32 buf_sz; - struct sk_buff *netbuf; - u32 ce_data; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + int i; - buf_sz = pipe_info->buf_sz; + ar = pci_pipe->hif_ce_state; + ce_pipe = pci_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; - /* Unused Copy Engine */ - if (buf_sz == 0) + if (!ce_ring) return; - ar = pipe_info->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); - ce_hdl = pipe_info->ce_hdl; + if (!pci_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; - while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, - &ce_data) == 0) { - dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr, - netbuf->len + skb_tailroom(netbuf), + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - dev_kfree_skb_any(netbuf); + dev_kfree_skb_any(skb); } } -static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) +static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; struct ath10k_pci *ar_pci; - struct ath10k_ce_pipe *ce_hdl; - struct sk_buff *netbuf; - u32 ce_data; - unsigned int nbytes; + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct ce_desc *ce_desc; + struct sk_buff *skb; unsigned int id; - u32 buf_sz; + int i; - buf_sz = pipe_info->buf_sz; + ar = pci_pipe->hif_ce_state; + ar_pci = ath10k_pci_priv(ar); + ce_pipe = pci_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; - /* Unused Copy Engine */ - if (buf_sz == 0) + if (!ce_ring) return; - ar = pipe_info->hif_ce_state; - ar_pci = ath10k_pci_priv(ar); - ce_hdl = pipe_info->ce_hdl; + if (!pci_pipe->buf_sz) + return; - while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, - &ce_data, &nbytes, &id) == 0) { - /* no need to call tx completion for NULL pointers */ - if (!netbuf) + ce_desc = ce_ring->shadow_base; + if (WARN_ON(!ce_desc)) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) continue; - ar_pci->msg_callbacks_current.tx_completion(ar, - netbuf, - id); + ce_ring->per_transfer_context[i] = NULL; + id = MS(__le16_to_cpu(ce_desc[i].flags), + CE_DESC_FLAGS_META_DATA); + + ar_pci->msg_callbacks_current.tx_completion(ar, skb); } } @@ -1266,6 +1330,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_warm_reset(ar); ath10k_pci_irq_disable(ar); + ath10k_pci_irq_sync(ar); ath10k_pci_flush(ar); } @@ -1386,6 +1451,9 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) &nbytes, &transfer_id, &flags)) return; + if (WARN_ON_ONCE(!xfer)) + return; + if (!xfer->wait_for_resp) { ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); return; @@ -1569,23 +1637,40 @@ static int ath10k_pci_init_config(struct ath10k *ar) return 0; } -static int ath10k_pci_alloc_ce(struct ath10k *ar) +static int ath10k_pci_alloc_pipes(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_pci_pipe *pipe; int i, ret; for (i = 0; i < CE_COUNT; i++) { - ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + pipe = &ar_pci->pipe_info[i]; + pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i], + ath10k_pci_ce_send_done, + ath10k_pci_ce_recv_data); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", i, ret); return ret; } + + /* Last CE is Diagnostic Window */ + if (i == CE_COUNT - 1) { + ar_pci->ce_diag = pipe->ce_hdl; + continue; + } + + pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); } return 0; } -static void ath10k_pci_free_ce(struct ath10k *ar) +static void ath10k_pci_free_pipes(struct ath10k *ar) { int i; @@ -1593,39 +1678,17 @@ static void ath10k_pci_free_ce(struct ath10k *ar) ath10k_ce_free_pipe(ar, i); } -static int ath10k_pci_ce_init(struct ath10k *ar) +static int ath10k_pci_init_pipes(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_pipe *pipe_info; - const struct ce_attr *attr; - int pipe_num, ret; + int i, ret; - for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num]; - pipe_info->pipe_num = pipe_num; - pipe_info->hif_ce_state = ar; - attr = &host_ce_config_wlan[pipe_num]; - - ret = ath10k_ce_init_pipe(ar, pipe_num, attr, - ath10k_pci_ce_send_done, - ath10k_pci_ce_recv_data); + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", - pipe_num, ret); + i, ret); return ret; } - - if (pipe_num == CE_COUNT - 1) { - /* - * Reserve the ultimate CE for - * diagnostic Window support - */ - ar_pci->ce_diag = pipe_info->ce_hdl; - continue; - } - - pipe_info->buf_sz = (size_t)(attr->src_sz_max); } return 0; @@ -1666,93 +1729,167 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) msleep(10); } -static int ath10k_pci_warm_reset(struct ath10k *ar) +static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) { u32 val; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); - - /* debug */ - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", - val); + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - CPU_INTR_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", - val); + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); +} - /* disable pending irqs */ - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_ENABLE_ADDRESS, 0); +static void ath10k_pci_warm_reset_ce(struct ath10k *ar) +{ + u32 val; - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_CLR_ADDRESS, ~0); + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); - msleep(100); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CE_RST_MASK); + msleep(10); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val & ~SOC_RESET_CONTROL_CE_RST_MASK); +} - /* clear fw indicator */ - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); +static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) +{ + u32 val; - /* clear target LF timer interrupts */ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS); ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS, val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); +} - /* reset CE */ - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, - val | SOC_RESET_CONTROL_CE_RST_MASK); - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - msleep(10); +static int ath10k_pci_warm_reset(struct ath10k *ar) +{ + int ret; - /* unreset CE */ - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, - val & ~SOC_RESET_CONTROL_CE_RST_MASK); - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - msleep(10); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); + + spin_lock_bh(&ar->data_lock); + ar->stats.fw_warm_reset_counter++; + spin_unlock_bh(&ar->data_lock); + ath10k_pci_irq_disable(ar); + + /* Make sure the target CPU is not doing anything dangerous, e.g. if it + * were to access copy engine while host performs copy engine reset + * then it is possible for the device to confuse pci-e controller to + * the point of bringing host system to a complete stop (i.e. hang). + */ ath10k_pci_warm_reset_si0(ar); + ath10k_pci_warm_reset_cpu(ar); + ath10k_pci_init_pipes(ar); + ath10k_pci_wait_for_target_init(ar); - /* debug */ - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", - val); + ath10k_pci_warm_reset_clear_lf(ar); + ath10k_pci_warm_reset_ce(ar); + ath10k_pci_warm_reset_cpu(ar); + ath10k_pci_init_pipes(ar); - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + - CPU_INTR_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", - val); + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target init: %d\n", ret); + return ret; + } - /* CPU warm reset */ - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, - val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); - val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + - SOC_RESET_CONTROL_ADDRESS); - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", - val); + return 0; +} - msleep(100); +static int ath10k_pci_chip_reset(struct ath10k *ar) +{ + int i, ret; + u32 val; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n"); + + /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. + * It is thus preferred to use warm reset which is safer but may not be + * able to recover the device from all possible fail scenarios. + * + * Warm reset doesn't always work on first try so attempt it a few + * times before giving up. + */ + for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { + ret = ath10k_pci_warm_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", + i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, + ret); + continue; + } + + /* FIXME: Sometimes copy engine doesn't recover after warm + * reset. In most cases this needs cold reset. In some of these + * cases the device is in such a state that a cold reset may + * lock up the host. + * + * Reading any host interest register via copy engine is + * sufficient to verify if device is capable of booting + * firmware blob. + */ + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_warn(ar, "failed to init copy engine: %d\n", + ret); + continue; + } + + ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, + &val); + if (ret) { + ath10k_warn(ar, "failed to poke copy engine: %d\n", + ret); + continue; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); + return 0; + } + + if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { + ath10k_warn(ar, "refusing cold reset as requested\n"); + return -EPERM; + } + + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n"); return 0; } -static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) +static int ath10k_pci_hif_power_up(struct ath10k *ar) { int ret; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up target: %d\n", ret); + return ret; + } + /* * Bring the target up cleanly. * @@ -1763,26 +1900,16 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) * is in an unexpected state. We try to catch that here in order to * reset the Target and retry the probe. */ - if (cold_reset) - ret = ath10k_pci_cold_reset(ar); - else - ret = ath10k_pci_warm_reset(ar); - + ret = ath10k_pci_chip_reset(ar); if (ret) { - ath10k_err(ar, "failed to reset target: %d\n", ret); - goto err; + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto err_sleep; } - ret = ath10k_pci_ce_init(ar); + ret = ath10k_pci_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); - goto err; - } - - ret = ath10k_pci_wait_for_target_init(ar); - if (ret) { - ath10k_err(ar, "failed to wait for target to init: %d\n", ret); - goto err_ce; + goto err_sleep; } ret = ath10k_pci_init_config(ar); @@ -1801,73 +1928,21 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) err_ce: ath10k_pci_ce_deinit(ar); - ath10k_pci_warm_reset(ar); -err: - return ret; -} - -static int ath10k_pci_hif_power_up_warm(struct ath10k *ar) -{ - int i, ret; - - /* - * Sometime warm reset succeeds after retries. - * - * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work - * at first try. - */ - for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { - ret = __ath10k_pci_hif_power_up(ar, false); - if (ret == 0) - break; - - ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n", - i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); - } +err_sleep: + ath10k_pci_sleep(ar); return ret; } -static int ath10k_pci_hif_power_up(struct ath10k *ar) -{ - int ret; - - ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); - - /* - * Hardware CUS232 version 2 has some issues with cold reset and the - * preferred (and safer) way to perform a device reset is through a - * warm reset. - * - * Warm reset doesn't always work though so fall back to cold reset may - * be necessary. - */ - ret = ath10k_pci_hif_power_up_warm(ar); - if (ret) { - ath10k_warn(ar, "failed to power up target using warm reset: %d\n", - ret); - - if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) - return ret; - - ath10k_warn(ar, "trying cold reset\n"); - - ret = __ath10k_pci_hif_power_up(ar, true); - if (ret) { - ath10k_err(ar, "failed to power up target using cold reset too (%d)\n", - ret); - return ret; - } - } - - return 0; -} - static void ath10k_pci_hif_power_down(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); - ath10k_pci_warm_reset(ar); + /* Currently hif_power_up performs effectively a reset and hif_stop + * resets the chip as well so there's no point in resetting here. + */ + + ath10k_pci_sleep(ar); } #ifdef CONFIG_PM @@ -1921,6 +1996,8 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, + .diag_read = ath10k_pci_hif_diag_read, + .diag_write = ath10k_pci_diag_write_mem, .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, .start = ath10k_pci_hif_start, .stop = ath10k_pci_hif_stop, @@ -1931,6 +2008,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, .power_up = ath10k_pci_hif_power_up, .power_down = ath10k_pci_hif_power_down, + .read32 = ath10k_pci_read32, + .write32 = ath10k_pci_write32, #ifdef CONFIG_PM .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, @@ -2250,14 +2329,14 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) if (ar_pci->num_msi_intrs == 0) /* Fix potential race by repeating CORE_BASE writes */ - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + - PCIE_INTR_ENABLE_ADDRESS, - PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL); + ath10k_pci_enable_legacy_irq(ar); mdelay(10); } while (time_before(jiffies, timeout)); + ath10k_pci_disable_and_clear_legacy_irq(ar); + ath10k_pci_irq_msi_fw_mask(ar); + if (val == 0xffffffff) { ath10k_err(ar, "failed to read device register, device is gone\n"); return -EIO; @@ -2287,6 +2366,12 @@ static int ath10k_pci_cold_reset(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); + spin_lock_bh(&ar->data_lock); + + ar->stats.fw_cold_reset_counter++; + + spin_unlock_bh(&ar->data_lock); + /* Put Target, including PCIe, into RESET. */ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); val |= 1; @@ -2400,6 +2485,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, u32 chip_id; ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, + ATH10K_BUS_PCI, &ath10k_pci_hif_ops); if (!ar) { dev_err(&pdev->dev, "failed to allocate core\n"); @@ -2435,7 +2521,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_sleep; } - ret = ath10k_pci_alloc_ce(ar); + ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); @@ -2443,25 +2529,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev, } ath10k_pci_ce_deinit(ar); - - ret = ath10k_ce_disable_interrupts(ar); - if (ret) { - ath10k_err(ar, "failed to disable copy engine interrupts: %d\n", - ret); - goto err_free_ce; - } - - /* Workaround: There's no known way to mask all possible interrupts via - * device CSR. The only way to make sure device doesn't assert - * interrupts is to reset it. Interrupts are then disabled on host - * after handlers are registered. - */ - ath10k_pci_warm_reset(ar); + ath10k_pci_irq_disable(ar); ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); - goto err_free_ce; + goto err_free_pipes; } ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", @@ -2474,8 +2547,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_deinit_irq; } - /* This shouldn't race as the device has been reset above. */ - ath10k_pci_irq_disable(ar); + ath10k_pci_sleep(ar); ret = ath10k_core_register(ar, chip_id); if (ret) { @@ -2492,8 +2564,8 @@ err_free_irq: err_deinit_irq: ath10k_pci_deinit_irq(ar); -err_free_ce: - ath10k_pci_free_ce(ar); +err_free_pipes: + ath10k_pci_free_pipes(ar); err_sleep: ath10k_pci_sleep(ar); @@ -2527,8 +2599,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_kill_tasklet(ar); ath10k_pci_deinit_irq(ar); ath10k_pci_ce_deinit(ar); - ath10k_pci_free_ce(ar); - ath10k_pci_sleep(ar); + ath10k_pci_free_pipes(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); } @@ -2565,5 +2636,7 @@ module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 3e1454b74e00..63ce61fcdac8 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -56,14 +56,14 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, } int ath10k_spectral_process_fft(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf) { struct fft_sample_ath10k *fft_sample; u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS]; u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag; - u32 reg0, reg1, nf_list1, nf_list2; + u32 reg0, reg1; u8 chain_idx, *bins; int dc_pos; @@ -82,7 +82,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar, /* TODO: there might be a reason why the hardware reports 20/40/80 MHz, * but the results/plots suggest that its actually 22/44/88 MHz. */ - switch (event->hdr.chan_width_mhz) { + switch (phyerr->chan_width_mhz) { case 20: fft_sample->chan_width_mhz = 22; break; @@ -101,7 +101,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar, fft_sample->chan_width_mhz = 88; break; default: - fft_sample->chan_width_mhz = event->hdr.chan_width_mhz; + fft_sample->chan_width_mhz = phyerr->chan_width_mhz; } fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB); @@ -110,36 +110,22 @@ int ath10k_spectral_process_fft(struct ath10k *ar, peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); fft_sample->max_magnitude = __cpu_to_be16(peak_mag); fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX); - fft_sample->rssi = event->hdr.rssi_combined; + fft_sample->rssi = phyerr->rssi_combined; total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB); base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB); fft_sample->total_gain_db = __cpu_to_be16(total_gain_db); fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db); - freq1 = __le16_to_cpu(event->hdr.freq1); - freq2 = __le16_to_cpu(event->hdr.freq2); + freq1 = __le16_to_cpu(phyerr->freq1); + freq2 = __le16_to_cpu(phyerr->freq2); fft_sample->freq1 = __cpu_to_be16(freq1); fft_sample->freq2 = __cpu_to_be16(freq2); - nf_list1 = __le32_to_cpu(event->hdr.nf_list_1); - nf_list2 = __le32_to_cpu(event->hdr.nf_list_2); chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX); - switch (chain_idx) { - case 0: - fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu); - break; - case 1: - fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu); - break; - case 2: - fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu); - break; - case 3: - fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu); - break; - } + fft_sample->noise = __cpu_to_be16( + __le16_to_cpu(phyerr->nf_chains[chain_idx])); bins = (u8 *)fftr; bins += sizeof(*fftr); diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h index ddc57c557272..042f5b302c75 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.h +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -47,8 +47,8 @@ enum ath10k_spectral_mode { #ifdef CONFIG_ATH10K_DEBUGFS int ath10k_spectral_process_fft(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf); int ath10k_spectral_start(struct ath10k *ar); int ath10k_spectral_vif_stop(struct ath10k_vif *arvif); @@ -59,8 +59,8 @@ void ath10k_spectral_destroy(struct ath10k *ar); static inline int ath10k_spectral_process_fft(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, size_t bin_len, u64 tsf) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 574b75ab2609..b289378b6e3e 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -20,6 +20,15 @@ #include <linux/tracepoint.h> #include "core.h" +#if !defined(_TRACE_H_) +static inline u32 ath10k_frm_hdr_len(const void *buf) +{ + const struct ieee80211_hdr *hdr = buf; + + return ieee80211_hdrlen(hdr->frame_control); +} +#endif + #define _TRACE_H_ /* create empty functions when tracing is disabled */ @@ -138,7 +147,8 @@ TRACE_EVENT(ath10k_log_dbg_dump, ); TRACE_EVENT(ath10k_wmi_cmd, - TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len, int ret), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len, + int ret), TP_ARGS(ar, id, buf, buf_len, ret), @@ -171,7 +181,7 @@ TRACE_EVENT(ath10k_wmi_cmd, ); TRACE_EVENT(ath10k_wmi_event, - TP_PROTO(struct ath10k *ar, int id, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len), TP_ARGS(ar, id, buf, buf_len), @@ -201,7 +211,7 @@ TRACE_EVENT(ath10k_wmi_event, ); TRACE_EVENT(ath10k_htt_stats, - TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len), TP_ARGS(ar, buf, buf_len), @@ -228,7 +238,7 @@ TRACE_EVENT(ath10k_htt_stats, ); TRACE_EVENT(ath10k_wmi_dbglog, - TP_PROTO(struct ath10k *ar, void *buf, size_t buf_len), + TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len), TP_ARGS(ar, buf, buf_len), @@ -254,6 +264,195 @@ TRACE_EVENT(ath10k_wmi_dbglog, ) ); +TRACE_EVENT(ath10k_htt_pktlog, + TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len), + + TP_ARGS(ar, buf, buf_len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, buf_len) + __dynamic_array(u8, pktlog, buf_len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(pktlog), buf, buf_len); + ), + + TP_printk( + "%s %s size %hu", + __get_str(driver), + __get_str(device), + __entry->buf_len + ) +); + +TRACE_EVENT(ath10k_htt_tx, + TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len, + u8 vdev_id, u8 tid), + + TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, msdu_id) + __field(u16, msdu_len) + __field(u8, vdev_id) + __field(u8, tid) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->msdu_id = msdu_id; + __entry->msdu_len = msdu_len; + __entry->vdev_id = vdev_id; + __entry->tid = tid; + ), + + TP_printk( + "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d", + __get_str(driver), + __get_str(device), + __entry->msdu_id, + __entry->msdu_len, + __entry->vdev_id, + __entry->tid + ) +); + +TRACE_EVENT(ath10k_txrx_tx_unref, + TP_PROTO(struct ath10k *ar, u16 msdu_id), + + TP_ARGS(ar, msdu_id), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, msdu_id) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->msdu_id = msdu_id; + ), + + TP_printk( + "%s %s msdu_id %d", + __get_str(driver), + __get_str(device), + __entry->msdu_id + ) +); + +DECLARE_EVENT_CLASS(ath10k_hdr_event, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, len) + __dynamic_array(u8, data, ath10k_frm_hdr_len(data)) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->len = ath10k_frm_hdr_len(data); + memcpy(__get_dynamic_array(data), data, __entry->len); + ), + + TP_printk( + "%s %s len %zu\n", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +DECLARE_EVENT_CLASS(ath10k_payload_event, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(size_t, len) + __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data))) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->len = len - ath10k_frm_hdr_len(data); + memcpy(__get_dynamic_array(payload), + data + ath10k_frm_hdr_len(data), __entry->len); + ), + + TP_printk( + "%s %s len %zu\n", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + TP_ARGS(ar, data, len) +); + +TRACE_EVENT(ath10k_htt_rx_desc, + TP_PROTO(struct ath10k *ar, const void *data, size_t len), + + TP_ARGS(ar, data, len), + + TP_STRUCT__entry( + __string(device, dev_name(ar->dev)) + __string(driver, dev_driver_string(ar->dev)) + __field(u16, len) + __dynamic_array(u8, rxdesc, len) + ), + + TP_fast_assign( + __assign_str(device, dev_name(ar->dev)); + __assign_str(driver, dev_driver_string(ar->dev)); + __entry->len = len; + memcpy(__get_dynamic_array(rxdesc), data, len); + ), + + TP_printk( + "%s %s rxdesc len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index a0cbc21d0d4b..7579de8e7a8c 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -78,6 +78,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); + trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id); if (tx_done->discard) { ieee80211_free_txskb(htt->ar->hw, msdu); @@ -145,7 +146,8 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, mapped = !!ath10k_peer_find(ar, vdev_id, addr); spin_unlock_bh(&ar->data_lock); - mapped == expect_mapped; + (mapped == expect_mapped || + test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); }), 3*HZ); if (ret <= 0) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2c42bd504b79..c0f3e4d09263 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -609,6 +609,40 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, }; +static void +ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, + const struct wmi_channel_arg *arg) +{ + u32 flags = 0; + + memset(ch, 0, sizeof(*ch)); + + if (arg->passive) + flags |= WMI_CHAN_FLAG_PASSIVE; + if (arg->allow_ibss) + flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; + if (arg->allow_ht) + flags |= WMI_CHAN_FLAG_ALLOW_HT; + if (arg->allow_vht) + flags |= WMI_CHAN_FLAG_ALLOW_VHT; + if (arg->ht40plus) + flags |= WMI_CHAN_FLAG_HT40_PLUS; + if (arg->chan_radar) + flags |= WMI_CHAN_FLAG_DFS; + + ch->mhz = __cpu_to_le32(arg->freq); + ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1); + ch->band_center_freq2 = 0; + ch->min_power = arg->min_power; + ch->max_power = arg->max_power; + ch->reg_power = arg->max_reg_power; + ch->antenna_max = arg->max_antenna_gain; + + /* mode & flags share storage */ + ch->mode = arg->mode; + ch->flags |= __cpu_to_le32(flags); +} + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { int ret; @@ -745,6 +779,10 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) ath10k_wmi_tx_beacons_nowait(ar); ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); + + if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ret = -ESHUTDOWN; + (ret != -EAGAIN); }), 3*HZ); @@ -800,6 +838,8 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); + trace_ath10k_tx_hdr(ar, skb->data, skb->len); + trace_ath10k_tx_payload(ar, skb->data, skb->len); /* Send the management frame buffer to the target */ ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); @@ -1073,13 +1113,46 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) return rate_idx; } +/* If keys are configured, HW decrypts all frames + * with protected bit set. Mark such frames as decrypted. + */ +static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar, + struct sk_buff *skb, + struct ieee80211_rx_status *status) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + unsigned int hdrlen; + bool peer_key; + u8 *addr, keyidx; + + if (!ieee80211_is_auth(hdr->frame_control) || + !ieee80211_has_protected(hdr->frame_control)) + return; + + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN)) + return; + + keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT; + addr = ieee80211_get_SA(hdr); + + spin_lock_bh(&ar->data_lock); + peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx); + spin_unlock_bh(&ar->data_lock); + + if (peer_key) { + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac wep key present for peer %pM\n", addr); + status->flag |= RX_FLAG_DECRYPTED; + } +} + static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_event_v1 *ev_v1; struct wmi_mgmt_rx_event_v2 *ev_v2; struct wmi_mgmt_rx_hdr_v1 *ev_hdr; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_channel *ch; struct ieee80211_hdr *hdr; u32 rx_status; u32 channel; @@ -1127,30 +1200,34 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) return 0; } - if (rx_status & WMI_RX_STATUS_ERR_CRC) - status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_status & WMI_RX_STATUS_ERR_CRC) { + dev_kfree_skb(skb); + return 0; + } + if (rx_status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to + /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to * MODE_11B. This means phy_mode is not a reliable source for the band - * of mgmt rx. */ - - ch = ar->scan_channel; - if (!ch) - ch = ar->rx_channel; - - if (ch) { - status->band = ch->band; - - if (phy_mode == MODE_11B && - status->band == IEEE80211_BAND_5GHZ) - ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + * of mgmt rx. + */ + if (channel >= 1 && channel <= 14) { + status->band = IEEE80211_BAND_2GHZ; + } else if (channel >= 36 && channel <= 165) { + status->band = IEEE80211_BAND_5GHZ; } else { - ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n"); - status->band = phy_mode_to_band(phy_mode); + /* Shouldn't happen unless list of advertised channels to + * mac80211 has been changed. + */ + WARN_ON_ONCE(1); + dev_kfree_skb(skb); + return 0; } + if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) + ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + status->freq = ieee80211_channel_to_frequency(channel, status->band); status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; status->rate_idx = get_rate_idx(rate, status->band); @@ -1160,6 +1237,8 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); + ath10k_wmi_handle_wep_reauth(ar, skb, status); + /* FW delivers WEP Shared Auth frame with Protected Bit set and * encrypted payload. However in case of PMF it delivers decrypted * frames with Protected Bit set. */ @@ -1295,14 +1374,196 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) return 0; } +static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src, + struct ath10k_fw_stats_pdev *dst) +{ + const struct wal_dbg_tx_stats *tx = &src->wal.tx; + const struct wal_dbg_rx_stats *rx = &src->wal.rx; + + dst->ch_noise_floor = __le32_to_cpu(src->chan_nf); + dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); + dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); + dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); + dst->cycle_count = __le32_to_cpu(src->cycle_count); + dst->phy_err_count = __le32_to_cpu(src->phy_err_count); + dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); + + dst->comp_queued = __le32_to_cpu(tx->comp_queued); + dst->comp_delivered = __le32_to_cpu(tx->comp_delivered); + dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued); + dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued); + dst->wmm_drop = __le32_to_cpu(tx->wmm_drop); + dst->local_enqued = __le32_to_cpu(tx->local_enqued); + dst->local_freed = __le32_to_cpu(tx->local_freed); + dst->hw_queued = __le32_to_cpu(tx->hw_queued); + dst->hw_reaped = __le32_to_cpu(tx->hw_reaped); + dst->underrun = __le32_to_cpu(tx->underrun); + dst->tx_abort = __le32_to_cpu(tx->tx_abort); + dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed); + dst->tx_ko = __le32_to_cpu(tx->tx_ko); + dst->data_rc = __le32_to_cpu(tx->data_rc); + dst->self_triggers = __le32_to_cpu(tx->self_triggers); + dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure); + dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err); + dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry); + dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout); + dst->pdev_resets = __le32_to_cpu(tx->pdev_resets); + dst->phy_underrun = __le32_to_cpu(tx->phy_underrun); + dst->txop_ovf = __le32_to_cpu(tx->txop_ovf); + + dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change); + dst->status_rcvd = __le32_to_cpu(rx->status_rcvd); + dst->r0_frags = __le32_to_cpu(rx->r0_frags); + dst->r1_frags = __le32_to_cpu(rx->r1_frags); + dst->r2_frags = __le32_to_cpu(rx->r2_frags); + dst->r3_frags = __le32_to_cpu(rx->r3_frags); + dst->htt_msdus = __le32_to_cpu(rx->htt_msdus); + dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus); + dst->loc_msdus = __le32_to_cpu(rx->loc_msdus); + dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus); + dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu); + dst->phy_errs = __le32_to_cpu(rx->phy_errs); + dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop); + dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs); +} + +static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src, + struct ath10k_fw_stats_peer *dst) +{ + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->peer_rssi = __le32_to_cpu(src->peer_rssi); + dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate); +} + +static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats(src, dst); + list_add_tail(&dst->list, &stats->pdevs); + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(src, dst); + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar, + struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + const struct wmi_stats_event *ev = (void *)skb->data; + u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + int i; + + if (!skb_pull(skb, sizeof(*ev))) + return -EPROTO; + + num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); + num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); + num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + + for (i = 0; i < num_pdev_stats; i++) { + const struct wmi_10x_pdev_stats *src; + struct ath10k_fw_stats_pdev *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_pdev_stats(&src->old, dst); + + dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad); + dst->rts_bad = __le32_to_cpu(src->rts_bad); + dst->rts_good = __le32_to_cpu(src->rts_good); + dst->fcs_bad = __le32_to_cpu(src->fcs_bad); + dst->no_beacons = __le32_to_cpu(src->no_beacons); + dst->mib_int_count = __le32_to_cpu(src->mib_int_count); + + list_add_tail(&dst->list, &stats->pdevs); + } + + /* fw doesn't implement vdev stats */ + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10x_peer_stats *src; + struct ath10k_fw_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath10k_wmi_pull_peer_stats(&src->old, dst); + + dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + + list_add_tail(&dst->list, &stats->peers); + } + + return 0; +} + +int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats); + else + return ath10k_wmi_main_pull_fw_stats(ar, skb, stats); +} + static void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; - ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); - - ath10k_debug_read_target_stats(ar, ev); + ath10k_debug_fw_stats_process(ar, skb); } static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, @@ -1579,6 +1840,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) struct wmi_bcn_info *bcn_info; struct ath10k_vif *arvif; struct sk_buff *bcn; + dma_addr_t paddr; int ret, vdev_id = 0; ev = (struct wmi_host_swba_event *)skb->data; @@ -1647,27 +1909,37 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ath10k_warn(ar, "SWBA overrun on vdev %d\n", arvif->vdev_id); - dma_unmap_single(arvif->ar->dev, - ATH10K_SKB_CB(arvif->beacon)->paddr, - arvif->beacon->len, DMA_TO_DEVICE); - dev_kfree_skb_any(arvif->beacon); - arvif->beacon = NULL; + ath10k_mac_vif_beacon_free(arvif); } - ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev, - bcn->data, bcn->len, - DMA_TO_DEVICE); - ret = dma_mapping_error(arvif->ar->dev, - ATH10K_SKB_CB(bcn)->paddr); - if (ret) { - ath10k_warn(ar, "failed to map beacon: %d\n", ret); - dev_kfree_skb_any(bcn); - goto skip; + if (!arvif->beacon_buf) { + paddr = dma_map_single(arvif->ar->dev, bcn->data, + bcn->len, DMA_TO_DEVICE); + ret = dma_mapping_error(arvif->ar->dev, paddr); + if (ret) { + ath10k_warn(ar, "failed to map beacon: %d\n", + ret); + dev_kfree_skb_any(bcn); + goto skip; + } + + ATH10K_SKB_CB(bcn)->paddr = paddr; + } else { + if (bcn->len > IEEE80211_MAX_FRAME_LEN) { + ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n", + bcn->len, IEEE80211_MAX_FRAME_LEN); + skb_trim(bcn, IEEE80211_MAX_FRAME_LEN); + } + memcpy(arvif->beacon_buf, bcn->data, bcn->len); + ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr; } arvif->beacon = bcn; arvif->beacon_sent = false; + trace_ath10k_tx_hdr(ar, bcn->data, bcn->len); + trace_ath10k_tx_payload(ar, bcn->data, bcn->len); + ath10k_wmi_tx_beacon_nowait(arvif); skip: spin_unlock_bh(&ar->data_lock); @@ -1681,8 +1953,8 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, } static void ath10k_dfs_radar_report(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_radar_report *rr, + const struct wmi_phyerr *phyerr, + const struct phyerr_radar_report *rr, u64 tsf) { u32 reg0, reg1, tsf32l; @@ -1715,12 +1987,12 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, return; /* report event to DFS pattern detector */ - tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp); + tsf32l = __le32_to_cpu(phyerr->tsf_timestamp); tsf64 = tsf & (~0xFFFFFFFFULL); tsf64 |= tsf32l; width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR); - rssi = event->hdr.rssi_combined; + rssi = phyerr->rssi_combined; /* hardware store this as 8 bit signed value, * set to zero if negative number @@ -1759,8 +2031,8 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, } static int ath10k_dfs_fft_report(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, - struct phyerr_fft_report *fftr, + const struct wmi_phyerr *phyerr, + const struct phyerr_fft_report *fftr, u64 tsf) { u32 reg0, reg1; @@ -1768,7 +2040,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, reg0 = __le32_to_cpu(fftr->reg0); reg1 = __le32_to_cpu(fftr->reg1); - rssi = event->hdr.rssi_combined; + rssi = phyerr->rssi_combined; ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", @@ -1797,20 +2069,20 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, } static void ath10k_wmi_event_dfs(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, + const struct wmi_phyerr *phyerr, u64 tsf) { int buf_len, tlv_len, res, i = 0; - struct phyerr_tlv *tlv; - struct phyerr_radar_report *rr; - struct phyerr_fft_report *fftr; - u8 *tlv_buf; + const struct phyerr_tlv *tlv; + const struct phyerr_radar_report *rr; + const struct phyerr_fft_report *fftr; + const u8 *tlv_buf; - buf_len = __le32_to_cpu(event->hdr.buf_len); + buf_len = __le32_to_cpu(phyerr->buf_len); ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", - event->hdr.phy_err_code, event->hdr.rssi_combined, - __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); + phyerr->phy_err_code, phyerr->rssi_combined, + __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len); /* Skip event if DFS disabled */ if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) @@ -1825,9 +2097,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, return; } - tlv = (struct phyerr_tlv *)&event->bufp[i]; + tlv = (struct phyerr_tlv *)&phyerr->buf[i]; tlv_len = __le16_to_cpu(tlv->len); - tlv_buf = &event->bufp[i + sizeof(*tlv)]; + tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", tlv_len, tlv->tag, tlv->sig); @@ -1841,7 +2113,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, } rr = (struct phyerr_radar_report *)tlv_buf; - ath10k_dfs_radar_report(ar, event, rr, tsf); + ath10k_dfs_radar_report(ar, phyerr, rr, tsf); break; case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { @@ -1851,7 +2123,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, } fftr = (struct phyerr_fft_report *)tlv_buf; - res = ath10k_dfs_fft_report(ar, event, fftr, tsf); + res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf); if (res) return; break; @@ -1863,16 +2135,16 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, - struct wmi_single_phyerr_rx_event *event, + const struct wmi_phyerr *phyerr, u64 tsf) { int buf_len, tlv_len, res, i = 0; struct phyerr_tlv *tlv; - u8 *tlv_buf; - struct phyerr_fft_report *fftr; + const void *tlv_buf; + const struct phyerr_fft_report *fftr; size_t fftr_len; - buf_len = __le32_to_cpu(event->hdr.buf_len); + buf_len = __le32_to_cpu(phyerr->buf_len); while (i < buf_len) { if (i + sizeof(*tlv) > buf_len) { @@ -1881,9 +2153,9 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar, return; } - tlv = (struct phyerr_tlv *)&event->bufp[i]; + tlv = (struct phyerr_tlv *)&phyerr->buf[i]; tlv_len = __le16_to_cpu(tlv->len); - tlv_buf = &event->bufp[i + sizeof(*tlv)]; + tlv_buf = &phyerr->buf[i + sizeof(*tlv)]; if (i + sizeof(*tlv) + tlv_len > buf_len) { ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", @@ -1900,8 +2172,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar, } fftr_len = tlv_len - sizeof(*fftr); - fftr = (struct phyerr_fft_report *)tlv_buf; - res = ath10k_spectral_process_fft(ar, event, + fftr = tlv_buf; + res = ath10k_spectral_process_fft(ar, phyerr, fftr, fftr_len, tsf); if (res < 0) { @@ -1918,8 +2190,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar, static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_comb_phyerr_rx_event *comb_event; - struct wmi_single_phyerr_rx_event *event; + const struct wmi_phyerr_event *ev; + const struct wmi_phyerr *phyerr; u32 count, i, buf_len, phy_err_code; u64 tsf; int left_len = skb->len; @@ -1927,38 +2199,38 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) ATH10K_DFS_STAT_INC(ar, phy_errors); /* Check if combined event available */ - if (left_len < sizeof(*comb_event)) { + if (left_len < sizeof(*ev)) { ath10k_warn(ar, "wmi phyerr combined event wrong len\n"); return; } - left_len -= sizeof(*comb_event); + left_len -= sizeof(*ev); /* Check number of included events */ - comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data; - count = __le32_to_cpu(comb_event->hdr.num_phyerr_events); + ev = (const struct wmi_phyerr_event *)skb->data; + count = __le32_to_cpu(ev->num_phyerrs); - tsf = __le32_to_cpu(comb_event->hdr.tsf_u32); + tsf = __le32_to_cpu(ev->tsf_u32); tsf <<= 32; - tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); + tsf |= __le32_to_cpu(ev->tsf_l32); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event phyerr count %d tsf64 0x%llX\n", count, tsf); - event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp; + phyerr = ev->phyerrs; for (i = 0; i < count; i++) { /* Check if we can read event header */ - if (left_len < sizeof(*event)) { + if (left_len < sizeof(*phyerr)) { ath10k_warn(ar, "single event (%d) wrong head len\n", i); return; } - left_len -= sizeof(*event); + left_len -= sizeof(*phyerr); - buf_len = __le32_to_cpu(event->hdr.buf_len); - phy_err_code = event->hdr.phy_err_code; + buf_len = __le32_to_cpu(phyerr->buf_len); + phy_err_code = phyerr->phy_err_code; if (left_len < buf_len) { ath10k_warn(ar, "single event (%d) wrong buf len\n", i); @@ -1969,20 +2241,20 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) switch (phy_err_code) { case PHY_ERROR_RADAR: - ath10k_wmi_event_dfs(ar, event, tsf); + ath10k_wmi_event_dfs(ar, phyerr, tsf); break; case PHY_ERROR_SPECTRAL_SCAN: - ath10k_wmi_event_spectral_scan(ar, event, tsf); + ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); break; case PHY_ERROR_FALSE_RADAR_EXT: - ath10k_wmi_event_dfs(ar, event, tsf); - ath10k_wmi_event_spectral_scan(ar, event, tsf); + ath10k_wmi_event_dfs(ar, phyerr, tsf); + ath10k_wmi_event_spectral_scan(ar, phyerr, tsf); break; default: break; } - event += sizeof(*event) + buf_len; + phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len; } } @@ -2028,7 +2300,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar, /* the last byte is always reserved for the null character */ buf[i] = '\0'; - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); + ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf); } static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) @@ -2163,30 +2435,117 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, return 0; } -static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, - struct sk_buff *skb) +static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + struct wmi_service_ready_event *ev; + size_t i, n; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + ev = (void *)skb->data; + skb_pull(skb, sizeof(*ev)); + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->sw_ver0 = ev->sw_version; + arg->sw_ver1 = ev->sw_version_1; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = ev->wmi_service_bitmap; + arg->service_map_len = sizeof(ev->wmi_service_bitmap); + + n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), + ARRAY_SIZE(arg->mem_reqs)); + for (i = 0; i < n; i++) + arg->mem_reqs[i] = &ev->mem_reqs[i]; + + if (skb->len < + __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) + return -EPROTO; + + return 0; +} + +static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + struct wmi_10x_service_ready_event *ev; + int i, n; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + ev = (void *)skb->data; + skb_pull(skb, sizeof(*ev)); + arg->min_tx_power = ev->hw_min_tx_power; + arg->max_tx_power = ev->hw_max_tx_power; + arg->ht_cap = ev->ht_cap_info; + arg->vht_cap = ev->vht_cap_info; + arg->sw_ver0 = ev->sw_version; + arg->phy_capab = ev->phy_capability; + arg->num_rf_chains = ev->num_rf_chains; + arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd; + arg->num_mem_reqs = ev->num_mem_reqs; + arg->service_map = ev->wmi_service_bitmap; + arg->service_map_len = sizeof(ev->wmi_service_bitmap); + + n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs), + ARRAY_SIZE(arg->mem_reqs)); + for (i = 0; i < n; i++) + arg->mem_reqs[i] = &ev->mem_reqs[i]; + + if (skb->len < + __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0])) + return -EPROTO; + + return 0; +} + +static void ath10k_wmi_event_service_ready(struct ath10k *ar, + struct sk_buff *skb) { - struct wmi_service_ready_event *ev = (void *)skb->data; - DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {}; + struct wmi_svc_rdy_ev_arg arg = {}; + u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; + int ret; - if (skb->len < sizeof(*ev)) { - ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", - skb->len, sizeof(*ev)); + memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg); + wmi_10x_svc_map(arg.service_map, ar->wmi.svc_map, + arg.service_map_len); + } else { + ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg); + wmi_main_svc_map(arg.service_map, ar->wmi.svc_map, + arg.service_map_len); + } + + if (ret) { + ath10k_warn(ar, "failed to parse service ready: %d\n", ret); return; } - ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); - ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); - ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); - ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); + ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power); + ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power); + ar->ht_cap_info = __le32_to_cpu(arg.ht_cap); + ar->vht_cap_info = __le32_to_cpu(arg.vht_cap); ar->fw_version_major = - (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; - ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); + (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24; + ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff); ar->fw_version_release = - (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16; - ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff); - ar->phy_capability = __le32_to_cpu(ev->phy_capability); - ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); + (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16; + ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff); + ar->phy_capability = __le32_to_cpu(arg.phy_capab); + ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains); + ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd); + + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + arg.service_map, arg.service_map_len); /* only manually set fw features when not using FW IE format */ if (ar->fw_api == 1 && ar->fw_version_build > 636) @@ -2198,13 +2557,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; } - ar->ath_common.regulatory.current_rd = - __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - - wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap); - ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); - ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", - ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); + ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1; + ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1; if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -2216,93 +2570,18 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->fw_version_build); } - /* FIXME: it probably should be better to support this */ - if (__le32_to_cpu(ev->num_mem_reqs) > 0) { - ath10k_warn(ar, "target requested %d memory chunks; ignoring\n", - __le32_to_cpu(ev->num_mem_reqs)); - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", - __le32_to_cpu(ev->sw_version), - __le32_to_cpu(ev->sw_version_1), - __le32_to_cpu(ev->abi_version), - __le32_to_cpu(ev->phy_capability), - __le32_to_cpu(ev->ht_cap_info), - __le32_to_cpu(ev->vht_cap_info), - __le32_to_cpu(ev->vht_supp_mcs), - __le32_to_cpu(ev->sys_cap_info), - __le32_to_cpu(ev->num_mem_reqs), - __le32_to_cpu(ev->num_rf_chains)); - - complete(&ar->wmi.service_ready); -} - -static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, - struct sk_buff *skb) -{ - u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; - int ret; - struct wmi_service_ready_event_10x *ev = (void *)skb->data; - DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {}; - - if (skb->len < sizeof(*ev)) { - ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", - skb->len, sizeof(*ev)); - return; - } - - ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); - ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); - ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); - ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); - ar->fw_version_major = - (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; - ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); - ar->phy_capability = __le32_to_cpu(ev->phy_capability); - ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); - - if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { - ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", - ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); - ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; - } - - ar->ath_common.regulatory.current_rd = - __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - - wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap); - ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); - ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", - ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); - - if (strlen(ar->hw->wiphy->fw_version) == 0) { - snprintf(ar->hw->wiphy->fw_version, - sizeof(ar->hw->wiphy->fw_version), - "%u.%u", - ar->fw_version_major, - ar->fw_version_minor); - } - - num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); - - if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { + num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs); + if (num_mem_reqs > WMI_MAX_MEM_REQS) { ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", num_mem_reqs); return; } - if (!num_mem_reqs) - goto exit; - - ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", - num_mem_reqs); - for (i = 0; i < num_mem_reqs; ++i) { - req_id = __le32_to_cpu(ev->mem_reqs[i].req_id); - num_units = __le32_to_cpu(ev->mem_reqs[i].num_units); - unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size); - num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info); + req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); + num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); + unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size); + num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info); if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) /* number of units to allocate is number of @@ -2316,7 +2595,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", req_id, - __le32_to_cpu(ev->mem_reqs[i].num_units), + __le32_to_cpu(arg.mem_reqs[i]->num_units), num_unit_info, unit_size, num_units); @@ -2327,23 +2606,23 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, return; } -exit: ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", - __le32_to_cpu(ev->sw_version), - __le32_to_cpu(ev->abi_version), - __le32_to_cpu(ev->phy_capability), - __le32_to_cpu(ev->ht_cap_info), - __le32_to_cpu(ev->vht_cap_info), - __le32_to_cpu(ev->vht_supp_mcs), - __le32_to_cpu(ev->sys_cap_info), - __le32_to_cpu(ev->num_mem_reqs), - __le32_to_cpu(ev->num_rf_chains)); + "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n", + __le32_to_cpu(arg.min_tx_power), + __le32_to_cpu(arg.max_tx_power), + __le32_to_cpu(arg.ht_cap), + __le32_to_cpu(arg.vht_cap), + __le32_to_cpu(arg.sw_ver0), + __le32_to_cpu(arg.sw_ver1), + __le32_to_cpu(arg.phy_capab), + __le32_to_cpu(arg.num_rf_chains), + __le32_to_cpu(arg.eeprom_rd), + __le32_to_cpu(arg.num_mem_reqs)); complete(&ar->wmi.service_ready); } -static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) +static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) { struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; @@ -2466,10 +2745,10 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_install_key_complete(ar, skb); break; case WMI_SERVICE_READY_EVENTID: - ath10k_wmi_service_ready_event_rx(ar, skb); + ath10k_wmi_event_service_ready(ar, skb); break; case WMI_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); @@ -2586,10 +2865,10 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_resume_req(ar, skb); break; case WMI_10X_SERVICE_READY_EVENTID: - ath10k_wmi_10x_service_ready_event_rx(ar, skb); + ath10k_wmi_event_service_ready(ar, skb); break; case WMI_10X_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); break; case WMI_10X_PDEV_UTF_EVENTID: /* ignore utf events */ @@ -2697,10 +2976,10 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_vdev_resume_req(ar, skb); break; case WMI_10_2_SERVICE_READY_EVENTID: - ath10k_wmi_10x_service_ready_event_rx(ar, skb); + ath10k_wmi_event_service_ready(ar, skb); break; case WMI_10_2_READY_EVENTID: - ath10k_wmi_ready_event_rx(ar, skb); + ath10k_wmi_event_ready(ar, skb); break; case WMI_10_2_RTT_KEEPALIVE_EVENTID: case WMI_10_2_GPIO_INPUT_EVENTID: @@ -2732,45 +3011,6 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) } } -/* WMI Initialization functions */ -int ath10k_wmi_attach(struct ath10k *ar) -{ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) - ar->wmi.cmd = &wmi_10_2_cmd_map; - else - ar->wmi.cmd = &wmi_10x_cmd_map; - - ar->wmi.vdev_param = &wmi_10x_vdev_param_map; - ar->wmi.pdev_param = &wmi_10x_pdev_param_map; - } else { - ar->wmi.cmd = &wmi_cmd_map; - ar->wmi.vdev_param = &wmi_vdev_param_map; - ar->wmi.pdev_param = &wmi_pdev_param_map; - } - - init_completion(&ar->wmi.service_ready); - init_completion(&ar->wmi.unified_ready); - init_waitqueue_head(&ar->wmi.tx_credits_wq); - - return 0; -} - -void ath10k_wmi_detach(struct ath10k *ar) -{ - int i; - - /* free the host memory chunks requested by firmware */ - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - dma_free_coherent(ar->dev, - ar->wmi.mem_chunks[i].len, - ar->wmi.mem_chunks[i].vaddr, - ar->wmi.mem_chunks[i].paddr); - } - - ar->wmi.num_mem_chunks = 0; -} - int ath10k_wmi_connect(struct ath10k *ar) { int status; @@ -2865,42 +3105,6 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, ctl2g, ctl5g); } -int ath10k_wmi_pdev_set_channel(struct ath10k *ar, - const struct wmi_channel_arg *arg) -{ - struct wmi_set_channel_cmd *cmd; - struct sk_buff *skb; - u32 ch_flags = 0; - - if (arg->passive) - return -EINVAL; - - skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - if (arg->chan_radar) - ch_flags |= WMI_CHAN_FLAG_DFS; - - cmd = (struct wmi_set_channel_cmd *)skb->data; - cmd->chan.mhz = __cpu_to_le32(arg->freq); - cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq); - cmd->chan.mode = arg->mode; - cmd->chan.flags |= __cpu_to_le32(ch_flags); - cmd->chan.min_power = arg->min_power; - cmd->chan.max_power = arg->max_power; - cmd->chan.reg_power = arg->max_reg_power; - cmd->chan.reg_classid = arg->reg_class_id; - cmd->chan.antenna_max = arg->max_antenna_gain; - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi set channel mode %d freq %d\n", - arg->mode, arg->freq); - - return ath10k_wmi_cmd_send(ar, skb, - ar->wmi.cmd->pdev_set_channel_cmdid); -} - int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) { struct wmi_pdev_suspend_cmd *cmd; @@ -2951,16 +3155,37 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); } +static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar, + struct wmi_host_mem_chunks *chunks) +{ + struct host_memory_chunk *chunk; + int i; + + chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + chunk = &chunks->items[i]; + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%llx\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr); + } +} + static int ath10k_wmi_main_cmd_init(struct ath10k *ar) { struct wmi_init_cmd *cmd; struct sk_buff *buf; struct wmi_resource_config config = {}; u32 len, val; - int i; config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); - config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS); config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS); config.num_offload_reorder_bufs = @@ -3019,32 +3244,8 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) cmd = (struct wmi_init_cmd *)buf->data; - if (ar->wmi.num_mem_chunks == 0) { - cmd->num_host_mem_chunks = 0; - goto out; - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", - ar->wmi.num_mem_chunks); - - cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); - - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - cmd->host_mem_chunks[i].ptr = - __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); - cmd->host_mem_chunks[i].size = - __cpu_to_le32(ar->wmi.mem_chunks[i].len); - cmd->host_mem_chunks[i].req_id = - __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi chunk %d len %d requested, addr 0x%llx\n", - i, - ar->wmi.mem_chunks[i].len, - (unsigned long long)ar->wmi.mem_chunks[i].paddr); - } -out: memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); @@ -3056,7 +3257,6 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) struct sk_buff *buf; struct wmi_resource_config_10x config = {}; u32 len, val; - int i; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); @@ -3110,32 +3310,8 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) cmd = (struct wmi_init_cmd_10x *)buf->data; - if (ar->wmi.num_mem_chunks == 0) { - cmd->num_host_mem_chunks = 0; - goto out; - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", - ar->wmi.num_mem_chunks); - - cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); - - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - cmd->host_mem_chunks[i].ptr = - __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); - cmd->host_mem_chunks[i].size = - __cpu_to_le32(ar->wmi.mem_chunks[i].len); - cmd->host_mem_chunks[i].req_id = - __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi chunk %d len %d requested, addr 0x%llx\n", - i, - ar->wmi.mem_chunks[i].len, - (unsigned long long)ar->wmi.mem_chunks[i].paddr); - } -out: memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); @@ -3147,7 +3323,6 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) struct sk_buff *buf; struct wmi_resource_config_10x config = {}; u32 len, val; - int i; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); @@ -3201,32 +3376,8 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) cmd = (struct wmi_init_cmd_10_2 *)buf->data; - if (ar->wmi.num_mem_chunks == 0) { - cmd->num_host_mem_chunks = 0; - goto out; - } - - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", - ar->wmi.num_mem_chunks); - - cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); - - for (i = 0; i < ar->wmi.num_mem_chunks; i++) { - cmd->host_mem_chunks[i].ptr = - __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); - cmd->host_mem_chunks[i].size = - __cpu_to_le32(ar->wmi.mem_chunks[i].len); - cmd->host_mem_chunks[i].req_id = - __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - - ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi chunk %d len %d requested, addr 0x%llx\n", - i, - ar->wmi.mem_chunks[i].len, - (unsigned long long)ar->wmi.mem_chunks[i].paddr); - } -out: memcpy(&cmd->resource_config.common, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); @@ -3248,52 +3399,50 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) return ret; } -static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, - const struct wmi_start_scan_arg *arg) +static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) { - int len; + if (arg->ie_len && !arg->ie) + return -EINVAL; + if (arg->n_channels && !arg->channels) + return -EINVAL; + if (arg->n_ssids && !arg->ssids) + return -EINVAL; + if (arg->n_bssids && !arg->bssids) + return -EINVAL; - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - len = sizeof(struct wmi_start_scan_cmd_10x); - else - len = sizeof(struct wmi_start_scan_cmd); + if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) + return -EINVAL; + if (arg->n_channels > ARRAY_SIZE(arg->channels)) + return -EINVAL; + if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) + return -EINVAL; + if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) + return -EINVAL; - if (arg->ie_len) { - if (!arg->ie) - return -EINVAL; - if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN) - return -EINVAL; + return 0; +} +static size_t +ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg) +{ + int len = 0; + + if (arg->ie_len) { len += sizeof(struct wmi_ie_data); len += roundup(arg->ie_len, 4); } if (arg->n_channels) { - if (!arg->channels) - return -EINVAL; - if (arg->n_channels > ARRAY_SIZE(arg->channels)) - return -EINVAL; - len += sizeof(struct wmi_chan_list); len += sizeof(__le32) * arg->n_channels; } if (arg->n_ssids) { - if (!arg->ssids) - return -EINVAL; - if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID) - return -EINVAL; - len += sizeof(struct wmi_ssid_list); len += sizeof(struct wmi_ssid) * arg->n_ssids; } if (arg->n_bssids) { - if (!arg->bssids) - return -EINVAL; - if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID) - return -EINVAL; - len += sizeof(struct wmi_bssid_list); len += sizeof(struct wmi_mac_addr) * arg->n_bssids; } @@ -3301,28 +3450,12 @@ static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, return len; } -int ath10k_wmi_start_scan(struct ath10k *ar, - const struct wmi_start_scan_arg *arg) +static void +ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn, + const struct wmi_start_scan_arg *arg) { - struct wmi_start_scan_cmd *cmd; - struct sk_buff *skb; - struct wmi_ie_data *ie; - struct wmi_chan_list *channels; - struct wmi_ssid_list *ssids; - struct wmi_bssid_list *bssids; u32 scan_id; u32 scan_req_id; - int off; - int len = 0; - int i; - - len = ath10k_wmi_start_scan_calc_len(ar, arg); - if (len < 0) - return len; /* len contains error code here */ - - skb = ath10k_wmi_alloc_skb(ar, len); - if (!skb) - return -ENOMEM; scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX; scan_id |= arg->scan_id; @@ -3330,35 +3463,36 @@ int ath10k_wmi_start_scan(struct ath10k *ar, scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; scan_req_id |= arg->scan_req_id; - cmd = (struct wmi_start_scan_cmd *)skb->data; - cmd->scan_id = __cpu_to_le32(scan_id); - cmd->scan_req_id = __cpu_to_le32(scan_req_id); - cmd->vdev_id = __cpu_to_le32(arg->vdev_id); - cmd->scan_priority = __cpu_to_le32(arg->scan_priority); - cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); - cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); - cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); - cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time); - cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time); - cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); - cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); - cmd->idle_time = __cpu_to_le32(arg->idle_time); - cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time); - cmd->probe_delay = __cpu_to_le32(arg->probe_delay); - cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); - - /* TLV list starts after fields included in the struct */ - /* There's just one filed that differes the two start_scan - * structures - burst_duration, which we are not using btw, - no point to make the split here, just shift the buffer to fit with - given FW */ - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - off = sizeof(struct wmi_start_scan_cmd_10x); - else - off = sizeof(struct wmi_start_scan_cmd); + cmn->scan_id = __cpu_to_le32(scan_id); + cmn->scan_req_id = __cpu_to_le32(scan_req_id); + cmn->vdev_id = __cpu_to_le32(arg->vdev_id); + cmn->scan_priority = __cpu_to_le32(arg->scan_priority); + cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events); + cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active); + cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive); + cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time); + cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time); + cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time); + cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time); + cmn->idle_time = __cpu_to_le32(arg->idle_time); + cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time); + cmn->probe_delay = __cpu_to_le32(arg->probe_delay); + cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); +} + +static void +ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs, + const struct wmi_start_scan_arg *arg) +{ + struct wmi_ie_data *ie; + struct wmi_chan_list *channels; + struct wmi_ssid_list *ssids; + struct wmi_bssid_list *bssids; + void *ptr = tlvs->tlvs; + int i; if (arg->n_channels) { - channels = (void *)skb->data + off; + channels = ptr; channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG); channels->num_chan = __cpu_to_le32(arg->n_channels); @@ -3366,12 +3500,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar, channels->channel_list[i].freq = __cpu_to_le16(arg->channels[i]); - off += sizeof(*channels); - off += sizeof(__le32) * arg->n_channels; + ptr += sizeof(*channels); + ptr += sizeof(__le32) * arg->n_channels; } if (arg->n_ssids) { - ssids = (void *)skb->data + off; + ssids = ptr; ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG); ssids->num_ssids = __cpu_to_le32(arg->n_ssids); @@ -3383,12 +3517,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar, arg->ssids[i].len); } - off += sizeof(*ssids); - off += sizeof(struct wmi_ssid) * arg->n_ssids; + ptr += sizeof(*ssids); + ptr += sizeof(struct wmi_ssid) * arg->n_ssids; } if (arg->n_bssids) { - bssids = (void *)skb->data + off; + bssids = ptr; bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG); bssids->num_bssid = __cpu_to_le32(arg->n_bssids); @@ -3397,23 +3531,57 @@ int ath10k_wmi_start_scan(struct ath10k *ar, arg->bssids[i].bssid, ETH_ALEN); - off += sizeof(*bssids); - off += sizeof(struct wmi_mac_addr) * arg->n_bssids; + ptr += sizeof(*bssids); + ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids; } if (arg->ie_len) { - ie = (void *)skb->data + off; + ie = ptr; ie->tag = __cpu_to_le32(WMI_IE_TAG); ie->ie_len = __cpu_to_le32(arg->ie_len); memcpy(ie->ie_data, arg->ie, arg->ie_len); - off += sizeof(*ie); - off += roundup(arg->ie_len, 4); + ptr += sizeof(*ie); + ptr += roundup(arg->ie_len, 4); } +} - if (off != skb->len) { - dev_kfree_skb(skb); - return -EINVAL; +int ath10k_wmi_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct sk_buff *skb; + size_t len; + int ret; + + ret = ath10k_wmi_start_scan_verify(arg); + if (ret) + return ret; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + len = sizeof(struct wmi_10x_start_scan_cmd) + + ath10k_wmi_start_scan_tlvs_len(arg); + else + len = sizeof(struct wmi_start_scan_cmd) + + ath10k_wmi_start_scan_tlvs_len(arg); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + struct wmi_10x_start_scan_cmd *cmd; + + cmd = (struct wmi_10x_start_scan_cmd *)skb->data; + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); + } else { + struct wmi_start_scan_cmd *cmd; + + cmd = (struct wmi_start_scan_cmd *)skb->data; + cmd->burst_duration_ms = __cpu_to_le32(0); + + ath10k_wmi_put_start_scan_common(&cmd->common, arg); + ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); } ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); @@ -3532,7 +3700,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar, struct sk_buff *skb; const char *cmdname; u32 flags = 0; - u32 ch_flags = 0; if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) @@ -3559,8 +3726,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar, flags |= WMI_VDEV_START_HIDDEN_SSID; if (arg->pmf_enabled) flags |= WMI_VDEV_START_PMF_ENABLED; - if (arg->channel.chan_radar) - ch_flags |= WMI_CHAN_FLAG_DFS; cmd = (struct wmi_vdev_start_request_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); @@ -3576,18 +3741,7 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar, memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); } - cmd->chan.mhz = __cpu_to_le32(arg->channel.freq); - - cmd->chan.band_center_freq1 = - __cpu_to_le32(arg->channel.band_center_freq1); - - cmd->chan.mode = arg->channel.mode; - cmd->chan.flags |= __cpu_to_le32(ch_flags); - cmd->chan.min_power = arg->channel.min_power; - cmd->chan.max_power = arg->channel.max_power; - cmd->chan.reg_power = arg->channel.max_reg_power; - cmd->chan.reg_classid = arg->channel.reg_class_id; - cmd->chan.antenna_max = arg->channel.max_antenna_gain; + ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n", @@ -3968,35 +4122,10 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); for (i = 0; i < arg->n_channels; i++) { - u32 flags = 0; - ch = &arg->channels[i]; ci = &cmd->chan_info[i]; - if (ch->passive) - flags |= WMI_CHAN_FLAG_PASSIVE; - if (ch->allow_ibss) - flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED; - if (ch->allow_ht) - flags |= WMI_CHAN_FLAG_ALLOW_HT; - if (ch->allow_vht) - flags |= WMI_CHAN_FLAG_ALLOW_VHT; - if (ch->ht40plus) - flags |= WMI_CHAN_FLAG_HT40_PLUS; - if (ch->chan_radar) - flags |= WMI_CHAN_FLAG_DFS; - - ci->mhz = __cpu_to_le32(ch->freq); - ci->band_center_freq1 = __cpu_to_le32(ch->freq); - ci->band_center_freq2 = 0; - ci->min_power = ch->min_power; - ci->max_power = ch->max_power; - ci->reg_power = ch->max_reg_power; - ci->antenna_max = ch->max_antenna_gain; - - /* mode & flags share storage */ - ci->mode = ch->mode; - ci->flags |= __cpu_to_le32(flags); + ath10k_wmi_put_wmi_channel(ci, ch); } return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); @@ -4108,9 +4237,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) - ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); - else ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); + else + ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); } else { ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); } @@ -4267,3 +4396,73 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); } + +int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap) +{ + struct wmi_pdev_pktlog_enable_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + ev_bitmap &= ATH10K_PKTLOG_ANY; + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi enable pktlog filter:%x\n", ev_bitmap); + + cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data; + cmd->ev_bitmap = __cpu_to_le32(ev_bitmap); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_pktlog_enable_cmdid); +} + +int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) +{ + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, 0); + if (!skb) + return -ENOMEM; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n"); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_pktlog_disable_cmdid); +} + +int ath10k_wmi_attach(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ar->wmi.cmd = &wmi_10_2_cmd_map; + else + ar->wmi.cmd = &wmi_10x_cmd_map; + + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; + ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + } else { + ar->wmi.cmd = &wmi_cmd_map; + ar->wmi.vdev_param = &wmi_vdev_param_map; + ar->wmi.pdev_param = &wmi_pdev_param_map; + } + + init_completion(&ar->wmi.service_ready); + init_completion(&ar->wmi.unified_ready); + + return 0; +} + +void ath10k_wmi_detach(struct ath10k *ar) +{ + int i; + + /* free the host memory chunks requested by firmware */ + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + dma_free_coherent(ar->dev, + ar->wmi.mem_chunks[i].len, + ar->wmi.mem_chunks[i].vaddr, + ar->wmi.mem_chunks[i].paddr); + } + + ar->wmi.num_mem_chunks = 0; +} diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 86f5ebccfe79..21391929d318 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -222,128 +222,131 @@ static inline char *wmi_service_name(int service_id) #undef SVCSTR } -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ - (__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ +#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) < (len) && \ + __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ BIT((svc_id)%(sizeof(u32)))) -#define SVCMAP(x, y) \ +#define SVCMAP(x, y, len) \ do { \ - if (WMI_SERVICE_IS_ENABLED((in), (x))) \ + if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \ __set_bit(y, out); \ } while (0) -static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out) +static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, + size_t len) { SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD, - WMI_SERVICE_BEACON_OFFLOAD); + WMI_SERVICE_BEACON_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD, - WMI_SERVICE_SCAN_OFFLOAD); + WMI_SERVICE_SCAN_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD, - WMI_SERVICE_ROAM_OFFLOAD); + WMI_SERVICE_ROAM_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD, - WMI_SERVICE_BCN_MISS_OFFLOAD); + WMI_SERVICE_BCN_MISS_OFFLOAD, len); SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE, - WMI_SERVICE_STA_PWRSAVE); + WMI_SERVICE_STA_PWRSAVE, len); SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, - WMI_SERVICE_STA_ADVANCED_PWRSAVE); + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); SVCMAP(WMI_10X_SERVICE_AP_UAPSD, - WMI_SERVICE_AP_UAPSD); + WMI_SERVICE_AP_UAPSD, len); SVCMAP(WMI_10X_SERVICE_AP_DFS, - WMI_SERVICE_AP_DFS); + WMI_SERVICE_AP_DFS, len); SVCMAP(WMI_10X_SERVICE_11AC, - WMI_SERVICE_11AC); + WMI_SERVICE_11AC, len); SVCMAP(WMI_10X_SERVICE_BLOCKACK, - WMI_SERVICE_BLOCKACK); + WMI_SERVICE_BLOCKACK, len); SVCMAP(WMI_10X_SERVICE_PHYERR, - WMI_SERVICE_PHYERR); + WMI_SERVICE_PHYERR, len); SVCMAP(WMI_10X_SERVICE_BCN_FILTER, - WMI_SERVICE_BCN_FILTER); + WMI_SERVICE_BCN_FILTER, len); SVCMAP(WMI_10X_SERVICE_RTT, - WMI_SERVICE_RTT); + WMI_SERVICE_RTT, len); SVCMAP(WMI_10X_SERVICE_RATECTRL, - WMI_SERVICE_RATECTRL); + WMI_SERVICE_RATECTRL, len); SVCMAP(WMI_10X_SERVICE_WOW, - WMI_SERVICE_WOW); + WMI_SERVICE_WOW, len); SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE, - WMI_SERVICE_RATECTRL_CACHE); + WMI_SERVICE_RATECTRL_CACHE, len); SVCMAP(WMI_10X_SERVICE_IRAM_TIDS, - WMI_SERVICE_IRAM_TIDS); + WMI_SERVICE_IRAM_TIDS, len); SVCMAP(WMI_10X_SERVICE_BURST, - WMI_SERVICE_BURST); + WMI_SERVICE_BURST, len); SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, - WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len); SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG, - WMI_SERVICE_FORCE_FW_HANG); + WMI_SERVICE_FORCE_FW_HANG, len); SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, - WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); } -static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out) +static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, + size_t len) { SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD, - WMI_SERVICE_BEACON_OFFLOAD); + WMI_SERVICE_BEACON_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD, - WMI_SERVICE_SCAN_OFFLOAD); + WMI_SERVICE_SCAN_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD, - WMI_SERVICE_ROAM_OFFLOAD); + WMI_SERVICE_ROAM_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, - WMI_SERVICE_BCN_MISS_OFFLOAD); + WMI_SERVICE_BCN_MISS_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE, - WMI_SERVICE_STA_PWRSAVE); + WMI_SERVICE_STA_PWRSAVE, len); SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, - WMI_SERVICE_STA_ADVANCED_PWRSAVE); + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD, - WMI_SERVICE_AP_UAPSD); + WMI_SERVICE_AP_UAPSD, len); SVCMAP(WMI_MAIN_SERVICE_AP_DFS, - WMI_SERVICE_AP_DFS); + WMI_SERVICE_AP_DFS, len); SVCMAP(WMI_MAIN_SERVICE_11AC, - WMI_SERVICE_11AC); + WMI_SERVICE_11AC, len); SVCMAP(WMI_MAIN_SERVICE_BLOCKACK, - WMI_SERVICE_BLOCKACK); + WMI_SERVICE_BLOCKACK, len); SVCMAP(WMI_MAIN_SERVICE_PHYERR, - WMI_SERVICE_PHYERR); + WMI_SERVICE_PHYERR, len); SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER, - WMI_SERVICE_BCN_FILTER); + WMI_SERVICE_BCN_FILTER, len); SVCMAP(WMI_MAIN_SERVICE_RTT, - WMI_SERVICE_RTT); + WMI_SERVICE_RTT, len); SVCMAP(WMI_MAIN_SERVICE_RATECTRL, - WMI_SERVICE_RATECTRL); + WMI_SERVICE_RATECTRL, len); SVCMAP(WMI_MAIN_SERVICE_WOW, - WMI_SERVICE_WOW); + WMI_SERVICE_WOW, len); SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE, - WMI_SERVICE_RATECTRL_CACHE); + WMI_SERVICE_RATECTRL_CACHE, len); SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS, - WMI_SERVICE_IRAM_TIDS); + WMI_SERVICE_IRAM_TIDS, len); SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD, - WMI_SERVICE_ARPNS_OFFLOAD); + WMI_SERVICE_ARPNS_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_NLO, - WMI_SERVICE_NLO); + WMI_SERVICE_NLO, len); SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD, - WMI_SERVICE_GTK_OFFLOAD); + WMI_SERVICE_GTK_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH, - WMI_SERVICE_SCAN_SCH); + WMI_SERVICE_SCAN_SCH, len); SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD, - WMI_SERVICE_CSA_OFFLOAD); + WMI_SERVICE_CSA_OFFLOAD, len); SVCMAP(WMI_MAIN_SERVICE_CHATTER, - WMI_SERVICE_CHATTER); + WMI_SERVICE_CHATTER, len); SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID, - WMI_SERVICE_COEX_FREQAVOID); + WMI_SERVICE_COEX_FREQAVOID, len); SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE, - WMI_SERVICE_PACKET_POWER_SAVE); + WMI_SERVICE_PACKET_POWER_SAVE, len); SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG, - WMI_SERVICE_FORCE_FW_HANG); + WMI_SERVICE_FORCE_FW_HANG, len); SVCMAP(WMI_MAIN_SERVICE_GPIO, - WMI_SERVICE_GPIO); + WMI_SERVICE_GPIO, len); SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, - WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len); SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, - WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, - WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE, - WMI_SERVICE_STA_KEEP_ALIVE); + WMI_SERVICE_STA_KEEP_ALIVE, len); SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP, - WMI_SERVICE_TX_ENCAP); + WMI_SERVICE_TX_ENCAP, len); } #undef SVCMAP @@ -1428,11 +1431,11 @@ struct wmi_service_ready_event { * where FW can access this memory directly (or) by DMA. */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[1]; + struct wlan_host_mem_req mem_reqs[0]; } __packed; /* This is the definition from 10.X firmware branch */ -struct wmi_service_ready_event_10x { +struct wmi_10x_service_ready_event { __le32 sw_version; __le32 abi_version; @@ -1467,7 +1470,7 @@ struct wmi_service_ready_event_10x { */ __le32 num_mem_reqs; - struct wlan_host_mem_req mem_reqs[1]; + struct wlan_host_mem_req mem_reqs[0]; } __packed; #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) @@ -1883,38 +1886,26 @@ struct host_memory_chunk { __le32 size; } __packed; +struct wmi_host_mem_chunks { + __le32 count; + /* some fw revisions require at least 1 chunk regardless of count */ + struct host_memory_chunk items[1]; +} __packed; + struct wmi_init_cmd { struct wmi_resource_config resource_config; - __le32 num_host_mem_chunks; - - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; + struct wmi_host_mem_chunks mem_chunks; } __packed; /* _10x stucture is from 10.X FW API */ struct wmi_init_cmd_10x { struct wmi_resource_config_10x resource_config; - __le32 num_host_mem_chunks; - - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; + struct wmi_host_mem_chunks mem_chunks; } __packed; struct wmi_init_cmd_10_2 { struct wmi_resource_config_10_2 resource_config; - __le32 num_host_mem_chunks; - - /* - * variable number of host memory chunks. - * This should be the last element in the structure - */ - struct host_memory_chunk host_mem_chunks[1]; + struct wmi_host_mem_chunks mem_chunks; } __packed; struct wmi_chan_list_entry { @@ -1964,6 +1955,11 @@ struct wmi_ssid_list { #define WLAN_SCAN_PARAMS_MAX_BSSID 4 #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 +/* Values lower than this may be refused by some firmware revisions with a scan + * completion with a timedout reason. + */ +#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40 + /* Scan priority numbers must be sequential, starting with 0 */ enum wmi_scan_priority { WMI_SCAN_PRIORITY_VERY_LOW = 0, @@ -1974,7 +1970,7 @@ enum wmi_scan_priority { WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */ }; -struct wmi_start_scan_cmd { +struct wmi_start_scan_common { /* Scan ID */ __le32 scan_id; /* Scan requestor ID */ @@ -2032,95 +2028,25 @@ struct wmi_start_scan_cmd { __le32 probe_delay; /* Scan control flags */ __le32 scan_ctrl_flags; - - /* Burst duration time in msecs */ - __le32 burst_duration; - /* - * TLV (tag length value ) paramerters follow the scan_cmd structure. - * TLV can contain channel list, bssid list, ssid list and - * ie. the TLV tags are defined above; - */ } __packed; -/* This is the definition from 10.X firmware branch */ -struct wmi_start_scan_cmd_10x { - /* Scan ID */ - __le32 scan_id; - - /* Scan requestor ID */ - __le32 scan_req_id; - - /* VDEV id(interface) that is requesting scan */ - __le32 vdev_id; - - /* Scan Priority, input to scan scheduler */ - __le32 scan_priority; - - /* Scan events subscription */ - __le32 notify_scan_events; - - /* dwell time in msec on active channels */ - __le32 dwell_time_active; - - /* dwell time in msec on passive channels */ - __le32 dwell_time_passive; - - /* - * min time in msec on the BSS channel,only valid if atleast one - * VDEV is active - */ - __le32 min_rest_time; - - /* - * max rest time in msec on the BSS channel,only valid if at least - * one VDEV is active - */ - /* - * the scanner will rest on the bss channel at least min_rest_time - * after min_rest_time the scanner will start checking for tx/rx - * activity on all VDEVs. if there is no activity the scanner will - * switch to off channel. if there is activity the scanner will let - * the radio on the bss channel until max_rest_time expires.at - * max_rest_time scanner will switch to off channel irrespective of - * activity. activity is determined by the idle_time parameter. - */ - __le32 max_rest_time; - - /* - * time before sending next set of probe requests. - * The scanner keeps repeating probe requests transmission with - * period specified by repeat_probe_time. - * The number of probe requests specified depends on the ssid_list - * and bssid_list - */ - __le32 repeat_probe_time; - - /* time in msec between 2 consequetive probe requests with in a set. */ - __le32 probe_spacing_time; - - /* - * data inactivity time in msec on bss channel that will be used by - * scanner for measuring the inactivity. - */ - __le32 idle_time; - - /* maximum time in msec allowed for scan */ - __le32 max_scan_time; - - /* - * delay in msec before sending first probe request after switching - * to a channel +struct wmi_start_scan_tlvs { + /* TLV parameters. These includes channel list, ssid list, bssid list, + * extra ies. */ - __le32 probe_delay; + u8 tlvs[0]; +} __packed; - /* Scan control flags */ - __le32 scan_ctrl_flags; +struct wmi_start_scan_cmd { + struct wmi_start_scan_common common; + __le32 burst_duration_ms; + struct wmi_start_scan_tlvs tlvs; +} __packed; - /* - * TLV (tag length value ) paramerters follow the scan_cmd structure. - * TLV can contain channel list, bssid list, ssid list and - * ie. the TLV tags are defined above; - */ +/* This is the definition from 10.X firmware branch */ +struct wmi_10x_start_scan_cmd { + struct wmi_start_scan_common common; + struct wmi_start_scan_tlvs tlvs; } __packed; struct wmi_ssid_arg { @@ -2306,94 +2232,25 @@ struct wmi_mgmt_rx_event_v2 { #define PHY_ERROR_FALSE_RADAR_EXT 0x24 #define PHY_ERROR_RADAR 0x05 -struct wmi_single_phyerr_rx_hdr { - /* TSF timestamp */ +struct wmi_phyerr { __le32 tsf_timestamp; - - /* - * Current freq1, freq2 - * - * [7:0]: freq1[lo] - * [15:8] : freq1[hi] - * [23:16]: freq2[lo] - * [31:24]: freq2[hi] - */ __le16 freq1; __le16 freq2; - - /* - * Combined RSSI over all chains and channel width for this PHY error - * - * [7:0]: RSSI combined - * [15:8]: Channel width (MHz) - * [23:16]: PHY error code - * [24:16]: reserved (future use) - */ u8 rssi_combined; u8 chan_width_mhz; u8 phy_err_code; u8 rsvd0; - - /* - * RSSI on chain 0 through 3 - * - * This is formatted the same as the PPDU_START RX descriptor - * field: - * - * [7:0]: pri20 - * [15:8]: sec20 - * [23:16]: sec40 - * [31:24]: sec80 - */ - - __le32 rssi_chain0; - __le32 rssi_chain1; - __le32 rssi_chain2; - __le32 rssi_chain3; - - /* - * Last calibrated NF value for chain 0 through 3 - * - * nf_list_1: - * - * + [15:0] - chain 0 - * + [31:16] - chain 1 - * - * nf_list_2: - * - * + [15:0] - chain 2 - * + [31:16] - chain 3 - */ - __le32 nf_list_1; - __le32 nf_list_2; - - /* Length of the frame */ + __le32 rssi_chains[4]; + __le16 nf_chains[4]; __le32 buf_len; + u8 buf[0]; } __packed; -struct wmi_single_phyerr_rx_event { - /* Phy error event header */ - struct wmi_single_phyerr_rx_hdr hdr; - /* frame buffer */ - u8 bufp[0]; -} __packed; - -struct wmi_comb_phyerr_rx_hdr { - /* Phy error phy error count */ - __le32 num_phyerr_events; +struct wmi_phyerr_event { + __le32 num_phyerrs; __le32 tsf_l32; __le32 tsf_u32; -} __packed; - -struct wmi_comb_phyerr_rx_event { - /* Phy error phy error count */ - struct wmi_comb_phyerr_rx_hdr hdr; - /* - * frame buffer - contains multiple payloads in the order: - * header - payload, header - payload... - * (The header is of type: wmi_single_phyerr_rx_hdr) - */ - u8 bufp[0]; + struct wmi_phyerr phyerrs[0]; } __packed; #define PHYERR_TLV_SIG 0xBB @@ -2908,11 +2765,6 @@ enum wmi_tp_scale { WMI_TP_SCALE_SIZE = 5, /* max num of enum */ }; -struct wmi_set_channel_cmd { - /* channel (only frequency and mode info are used) */ - struct wmi_channel chan; -} __packed; - struct wmi_pdev_chanlist_update_event { /* number of channels */ __le32 num_chan; @@ -2943,6 +2795,10 @@ struct wmi_pdev_set_channel_cmd { struct wmi_channel chan; } __packed; +struct wmi_pdev_pktlog_enable_cmd { + __le32 ev_bitmap; +} __packed; + /* Customize the DSCP (bit) to TID (0-7) mapping for QOS */ #define WMI_DSCP_MAP_MAX (64) struct wmi_pdev_set_dscp_tid_map_cmd { @@ -3177,7 +3033,7 @@ struct wmi_stats_event { * PDEV statistics * TODO: add all PDEV stats here */ -struct wmi_pdev_stats_old { +struct wmi_pdev_stats { __le32 chan_nf; /* Channel noise floor */ __le32 tx_frame_count; /* TX frame count */ __le32 rx_frame_count; /* RX frame count */ @@ -3188,15 +3044,8 @@ struct wmi_pdev_stats_old { struct wal_dbg_stats wal; /* WAL dbg stats */ } __packed; -struct wmi_pdev_stats_10x { - __le32 chan_nf; /* Channel noise floor */ - __le32 tx_frame_count; /* TX frame count */ - __le32 rx_frame_count; /* RX frame count */ - __le32 rx_clear_count; /* rx clear count */ - __le32 cycle_count; /* cycle count */ - __le32 phy_err_count; /* Phy error count */ - __le32 chan_tx_pwr; /* channel tx power */ - struct wal_dbg_stats wal; /* WAL dbg stats */ +struct wmi_10x_pdev_stats { + struct wmi_pdev_stats old; __le32 ack_rx_bad; __le32 rts_bad; __le32 rts_good; @@ -3217,16 +3066,14 @@ struct wmi_vdev_stats { * peer statistics. * TODO: add more stats */ -struct wmi_peer_stats_old { +struct wmi_peer_stats { struct wmi_mac_addr peer_macaddr; __le32 peer_rssi; __le32 peer_tx_rate; } __packed; -struct wmi_peer_stats_10x { - struct wmi_mac_addr peer_macaddr; - __le32 peer_rssi; - __le32 peer_tx_rate; +struct wmi_10x_peer_stats { + struct wmi_peer_stats old; __le32 peer_rx_rate; } __packed; @@ -4708,7 +4555,6 @@ struct wmi_dbglog_cfg_cmd { __le32 config_valid; } __packed; -#define ATH10K_RTS_MAX 2347 #define ATH10K_FRAGMT_THRESHOLD_MIN 540 #define ATH10K_FRAGMT_THRESHOLD_MAX 2346 @@ -4719,8 +4565,27 @@ struct wmi_dbglog_cfg_cmd { /* By default disable power save for IBSS */ #define ATH10K_DEFAULT_ATIM 0 +#define WMI_MAX_MEM_REQS 16 + +struct wmi_svc_rdy_ev_arg { + __le32 min_tx_power; + __le32 max_tx_power; + __le32 ht_cap; + __le32 vht_cap; + __le32 sw_ver0; + __le32 sw_ver1; + __le32 phy_capab; + __le32 num_rf_chains; + __le32 eeprom_rd; + __le32 num_mem_reqs; + const __le32 *service_map; + size_t service_map_len; + const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; +}; + struct ath10k; struct ath10k_vif; +struct ath10k_fw_stats; int ath10k_wmi_attach(struct ath10k *ar); void ath10k_wmi_detach(struct ath10k *ar); @@ -4732,8 +4597,6 @@ int ath10k_wmi_connect(struct ath10k *ar); struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); -int ath10k_wmi_pdev_set_channel(struct ath10k *ar, - const struct wmi_channel_arg *); int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt); int ath10k_wmi_pdev_resume_target(struct ath10k *ar); int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, @@ -4794,5 +4657,9 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms); int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable); +int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats); +int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list); +int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar); #endif /* _WMI_H_ */ |