diff options
Diffstat (limited to 'drivers')
103 files changed, 5533 insertions, 2798 deletions
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index 88a93c266c19..6f8fc5f587fe 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -419,12 +419,12 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config; pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config; - pc_host->mem_resource.name = "BCMA PCIcore external memory", + pc_host->mem_resource.name = "BCMA PCIcore external memory"; pc_host->mem_resource.start = BCMA_SOC_PCI_DMA; pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1; pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; - pc_host->io_resource.name = "BCMA PCIcore external I/O", + pc_host->io_resource.name = "BCMA PCIcore external I/O"; pc_host->io_resource.start = 0x100; pc_host->io_resource.end = 0x7FF; pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED; diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index b2da1f937478..9d730f8ac816 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -187,6 +187,26 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = { }; +static bool ath11k_ce_need_shadow_fix(int ce_id) +{ + /* only ce4 needs shadow workaroud*/ + if (ce_id == 4) + return true; + return false; +} + +static void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab) +{ + int i; + + if (!ab->hw_params.supports_shadow_regs) + return; + + for (i = 0; i < ab->hw_params.ce_count; i++) + if (ath11k_ce_need_shadow_fix(i)) + ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); +} + static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, struct sk_buff *skb, dma_addr_t paddr) { @@ -505,6 +525,12 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab, ce_ring->hal_ring_id = ret; + if (ab->hw_params.supports_shadow_regs && + ath11k_ce_need_shadow_fix(ce_id)) + ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id], + ATH11K_SHADOW_CTRL_TIMER_INTERVAL, + ce_ring->hal_ring_id); + return 0; } @@ -677,6 +703,9 @@ int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, ath11k_hal_srng_access_end(ab, srng); + if (ath11k_ce_need_shadow_fix(pipe_id)) + ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]); + spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); @@ -713,11 +742,56 @@ static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) } } +static void ath11k_ce_shadow_config(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ab->hw_params.host_ce_config[i].src_nentries) + ath11k_hal_srng_update_shadow_config(ab, + HAL_CE_SRC, i); + + if (ab->hw_params.host_ce_config[i].dest_nentries) { + ath11k_hal_srng_update_shadow_config(ab, + HAL_CE_DST, i); + + ath11k_hal_srng_update_shadow_config(ab, + HAL_CE_DST_STATUS, i); + } + } +} + +void ath11k_ce_get_shadow_config(struct ath11k_base *ab, + u32 **shadow_cfg, u32 *shadow_cfg_len) +{ + if (!ab->hw_params.supports_shadow_regs) + return; + + ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); + + /* shadow is already configured */ + if (*shadow_cfg_len) + return; + + /* shadow isn't configured yet, configure now. + * non-CE srngs are configured firstly, then + * all CE srngs. + */ + ath11k_hal_srng_shadow_config(ab); + ath11k_ce_shadow_config(ab); + + /* get the shadow configuration */ + ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); +} +EXPORT_SYMBOL(ath11k_ce_get_shadow_config); + void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int pipe_num; + ath11k_ce_stop_shadow_timers(ab); + for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) { pipe = &ab->ce.ce_pipe[pipe_num]; ath11k_ce_rx_pipe_cleanup(pipe); @@ -767,6 +841,9 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab) int i; int ret; + ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2, + &ab->qmi.ce_cfg.shadow_reg_v2_len); + for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; @@ -828,6 +905,9 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab) for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; + if (ath11k_ce_need_shadow_fix(i)) + ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); + if (pipe->src_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); dma_free_coherent(ab->dev, diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index cf704f18f3a1..269b599ac0b0 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -168,6 +168,7 @@ struct ath11k_ce { struct ath11k_ce_pipe ce_pipe[CE_COUNT_MAX]; /* Protects rings of all ce pipes */ spinlock_t ce_lock; + struct ath11k_hp_update_timer hp_timer[CE_COUNT_MAX]; }; extern const struct ce_attr ath11k_host_ce_config_ipq8074[]; @@ -187,4 +188,6 @@ void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id); int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int ath11k_ce_attr_attach(struct ath11k_base *ab); +void ath11k_ce_get_shadow_config(struct ath11k_base *ab, + u32 **shadow_cfg, u32 *shadow_cfg_len); #endif diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 0a85f20b6499..ebd6886a8c18 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -58,6 +58,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .htt_peer_map_v2 = true, .tcl_0_only = false, .spectral_fft_sz = 2, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), + .supports_monitor = true, + .supports_shadow_regs = false, + .idle_ps = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -88,6 +95,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .htt_peer_map_v2 = true, .tcl_0_only = false, .spectral_fft_sz = 4, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT), + .supports_monitor = true, + .supports_shadow_regs = false, + .idle_ps = false, }, { .name = "qca6390 hw2.0", @@ -118,6 +132,12 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .htt_peer_map_v2 = false, .tcl_0_only = true, .spectral_fft_sz = 0, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .supports_monitor = false, + .supports_shadow_regs = true, + .idle_ps = true, }, }; @@ -398,6 +418,7 @@ static void ath11k_core_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_qmi_firmware_stop(ab); + ath11k_hif_stop(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 02a87027c4e4..18b97420f0d8 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -37,6 +37,8 @@ extern unsigned int ath11k_frame_mode; +#define ATH11K_MON_TIMER_INTERVAL 10 + enum ath11k_supported_bw { ATH11K_BW_20 = 0, ATH11K_BW_40 = 1, @@ -727,6 +729,7 @@ struct ath11k_base { struct ath11k_dbring_cap *db_caps; u32 num_db_cap; + struct timer_list mon_reap_timer; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 5193b308a992..1b914e67d314 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -837,12 +837,8 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab) return 0; ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); - - if (IS_ERR_OR_NULL(ab->debugfs_soc)) { - if (IS_ERR(ab->debugfs_soc)) - return PTR_ERR(ab->debugfs_soc); - return -ENOMEM; - } + if (IS_ERR(ab->debugfs_soc)) + return PTR_ERR(ab->debugfs_soc); debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, &fops_simulate_fw_crash); @@ -855,27 +851,21 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab) void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab) { - debugfs_remove_recursive(ab->debugfs_ath11k); - ab->debugfs_ath11k = NULL; + debugfs_remove_recursive(ab->debugfs_soc); + ab->debugfs_soc = NULL; } int ath11k_debugfs_soc_create(struct ath11k_base *ab) { ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL); - if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) { - if (IS_ERR(ab->debugfs_ath11k)) - return PTR_ERR(ab->debugfs_ath11k); - return -ENOMEM; - } - - return 0; + return PTR_ERR_OR_ZERO(ab->debugfs_ath11k); } void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) { - debugfs_remove_recursive(ab->debugfs_soc); - ab->debugfs_soc = NULL; + debugfs_remove_recursive(ab->debugfs_ath11k); + ab->debugfs_ath11k = NULL; } void ath11k_debugfs_fw_stats_init(struct ath11k *ar) @@ -1069,13 +1059,8 @@ int ath11k_debugfs_register(struct ath11k *ar) snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); - - if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) { - if (IS_ERR(ar->debug.debugfs_pdev)) - return PTR_ERR(ar->debug.debugfs_pdev); - - return -ENOMEM; - } + if (IS_ERR(ar->debug.debugfs_pdev)) + return PTR_ERR(ar->debug.debugfs_pdev); /* Create a symlink under ieee80211/phy* */ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev); diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 677e2d9fec11..59dd185a0cfc 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -304,11 +304,25 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, return 0; } +static void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) +{ + int i; + + if (!ab->hw_params.supports_shadow_regs) + return; + + for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); + + ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); +} + static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int i; + ath11k_dp_stop_shadow_timers(ab); ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); @@ -374,6 +388,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; ath11k_hal_tx_init_data_ring(ab, srng); + + ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i], + ATH11K_SHADOW_DP_TIMER_INTERVAL, + dp->tx_ring[i].tcl_data_ring.ring_id); } ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, @@ -409,6 +427,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; ath11k_hal_reo_init_cmd_ring(ab, srng); + ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer, + ATH11K_SHADOW_CTRL_TIMER_INTERVAL, + dp->reo_cmd_ring.ring_id); + ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 0, 0, DP_REO_STATUS_RING_SIZE); if (ret) { @@ -812,8 +834,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, - HAL_RX_BUF_RBM_SW3_BM, - GFP_ATOMIC); + HAL_RX_BUF_RBM_SW3_BM); } } } @@ -829,6 +850,8 @@ void ath11k_dp_pdev_free(struct ath11k_base *ab) struct ath11k *ar; int i; + del_timer_sync(&ab->mon_reap_timer); + for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; ath11k_dp_rx_pdev_free(ab, i); @@ -1065,3 +1088,78 @@ fail_link_desc_cleanup: return ret; } + +static void ath11k_dp_shadow_timer_handler(struct timer_list *t) +{ + struct ath11k_hp_update_timer *update_timer = from_timer(update_timer, + t, timer); + struct ath11k_base *ab = update_timer->ab; + struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id]; + + spin_lock_bh(&srng->lock); + + /* when the timer is fired, the handler checks whether there + * are new TX happened. The handler updates HP only when there + * are no TX operations during the timeout interval, and stop + * the timer. Timer will be started again when TX happens again. + */ + if (update_timer->timer_tx_num != update_timer->tx_num) { + update_timer->timer_tx_num = update_timer->tx_num; + mod_timer(&update_timer->timer, jiffies + + msecs_to_jiffies(update_timer->interval)); + } else { + update_timer->started = false; + ath11k_hal_srng_shadow_update_hp_tp(ab, srng); + } + + spin_unlock_bh(&srng->lock); +} + +void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, + struct hal_srng *srng, + struct ath11k_hp_update_timer *update_timer) +{ + lockdep_assert_held(&srng->lock); + + if (!ab->hw_params.supports_shadow_regs) + return; + + update_timer->tx_num++; + + if (update_timer->started) + return; + + update_timer->started = true; + update_timer->timer_tx_num = update_timer->tx_num; + mod_timer(&update_timer->timer, jiffies + + msecs_to_jiffies(update_timer->interval)); +} + +void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, + struct ath11k_hp_update_timer *update_timer) +{ + if (!ab->hw_params.supports_shadow_regs) + return; + + if (!update_timer->init) + return; + + del_timer_sync(&update_timer->timer); +} + +void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, + struct ath11k_hp_update_timer *update_timer, + u32 interval, u32 ring_id) +{ + if (!ab->hw_params.supports_shadow_regs) + return; + + update_timer->tx_num = 0; + update_timer->timer_tx_num = 0; + update_timer->ab = ab; + update_timer->ring_id = ring_id; + update_timer->interval = interval; + update_timer->init = true; + timer_setup(&update_timer->timer, + ath11k_dp_shadow_timer_handler, 0); +} diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 558f03fbec99..ee8db812589b 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -40,6 +40,7 @@ struct dp_rx_tid { #define DP_REO_DESC_FREE_THRESHOLD 64 #define DP_REO_DESC_FREE_TIMEOUT_MS 1000 +#define DP_MON_SERVICE_BUDGET 128 struct dp_reo_cache_flush_elem { struct list_head list; @@ -205,6 +206,20 @@ struct ath11k_pdev_dp { #define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2) #define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19) +#define ATH11K_SHADOW_DP_TIMER_INTERVAL 20 +#define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10 + +struct ath11k_hp_update_timer { + struct timer_list timer; + bool started; + bool init; + u32 tx_num; + u32 timer_tx_num; + u32 ring_id; + u32 interval; + struct ath11k_base *ab; +}; + struct ath11k_dp { struct ath11k_base *ab; enum ath11k_htc_ep_id eid; @@ -234,6 +249,8 @@ struct ath11k_dp { * - reo_cmd_cache_flush_count */ spinlock_t reo_cmd_lock; + struct ath11k_hp_update_timer reo_cmd_timer; + struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX]; }; /* HTT definitions */ @@ -497,7 +514,7 @@ struct htt_ppdu_stats_cfg_cmd { } __packed; #define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0) -#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9) +#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8) #define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16) enum htt_ppdu_stats_tag_type { @@ -1615,5 +1632,13 @@ int ath11k_dp_link_desc_setup(struct ath11k_base *ab, struct dp_link_desc_bank *link_desc_banks, u32 ring_type, struct hal_srng *srng, u32 n_link_desc); +void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, + struct hal_srng *srng, + struct ath11k_hp_update_timer *update_timer); +void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, + struct ath11k_hp_update_timer *update_timer); +void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, + struct ath11k_hp_update_timer *update_timer, + u32 interval, u32 ring_id); #endif diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 345eaa4f20f3..01625327eef7 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -262,12 +262,23 @@ static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); } +static void ath11k_dp_service_mon_ring(struct timer_list *t) +{ + struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); + int i; + + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) + ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); + + mod_timer(&ab->mon_reap_timer, jiffies + + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); +} + /* Returns number of Rx buffers replenished */ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, - enum hal_rx_buf_return_buf_manager mgr, - gfp_t gfp) + enum hal_rx_buf_return_buf_manager mgr) { struct hal_srng *srng; u32 *desc; @@ -314,7 +325,7 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, spin_lock_bh(&rx_ring->idr_lock); buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, - rx_ring->bufs_max * 3, gfp); + rx_ring->bufs_max * 3, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); if (buf_id < 0) goto fail_dma_unmap; @@ -434,7 +445,7 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, rx_ring->bufs_max = num_entries; ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, - HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL); + HAL_RX_BUF_RBM_SW3_BM); return 0; } @@ -570,9 +581,14 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) /* if rxdma1_enable is false, then it doesn't need * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring * and rxdma_mon_desc_ring. + * init reap timer for QCA6390. */ - if (!ar->ab->hw_params.rxdma1_enable) + if (!ar->ab->hw_params.rxdma1_enable) { + //init mon status buffer reap timer + timer_setup(&ar->ab->mon_reap_timer, + ath11k_dp_service_mon_ring, 0); return 0; + } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring, @@ -1478,7 +1494,7 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, } spin_unlock_bh(&ar->data_lock); - ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL); + ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); if (!ppdu_info) return NULL; @@ -2598,7 +2614,7 @@ try_again: rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], - HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); + HAL_RX_BUF_RBM_SW3_BM); } ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list, @@ -2680,7 +2696,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, struct dp_rxdma_ring *rx_ring, - int *buf_id, gfp_t gfp) + int *buf_id) { struct sk_buff *skb; dma_addr_t paddr; @@ -2705,7 +2721,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, spin_lock_bh(&rx_ring->idr_lock); *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, - rx_ring->bufs_max, gfp); + rx_ring->bufs_max, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); if (*buf_id < 0) goto fail_dma_unmap; @@ -2725,8 +2741,7 @@ fail_alloc_skb: int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, - enum hal_rx_buf_return_buf_manager mgr, - gfp_t gfp) + enum hal_rx_buf_return_buf_manager mgr) { struct hal_srng *srng; u32 *desc; @@ -2752,7 +2767,7 @@ int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, while (num_remain > 0) { skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, - &buf_id, gfp); + &buf_id); if (!skb) break; paddr = ATH11K_SKB_RXCB(skb)->paddr; @@ -2863,7 +2878,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, } move_next: skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, - &buf_id, GFP_ATOMIC); + &buf_id); if (!skb) { ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, @@ -3676,7 +3691,7 @@ exit: rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], - HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); + HAL_RX_BUF_RBM_SW3_BM); } return tot_n_bufs_reaped; @@ -3972,7 +3987,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], - HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); + HAL_RX_BUF_RBM_SW3_BM); } rcu_read_lock(); @@ -4081,7 +4096,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) if (num_buf_freed) ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, - HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); + HAL_RX_BUF_RBM_SW3_BM); return budget - quota; } @@ -4291,8 +4306,13 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, void *src_srng_desc; int ret = 0; - dp_srng = &dp->rxdma_mon_desc_ring; - hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; + if (ar->ab->hw_params.rxdma1_enable) { + dp_srng = &dp->rxdma_mon_desc_ring; + hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; + } else { + dp_srng = &ar->ab->dp.wbm_desc_rel_ring; + hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; + } ath11k_hal_srng_access_begin(ar->ab, hal_srng); @@ -4316,16 +4336,16 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, static void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, dma_addr_t *paddr, u32 *sw_cookie, + u8 *rbm, void **pp_buf_addr_info) { struct hal_rx_msdu_link *msdu_link = (struct hal_rx_msdu_link *)rx_msdu_link_desc; struct ath11k_buffer_addr *buf_addr_info; - u8 rbm = 0; buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; - ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); + ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); *pp_buf_addr_info = (void *)buf_addr_info; } @@ -4436,7 +4456,7 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, } static u32 -ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, +ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, void *ring_entry, struct sk_buff **head_msdu, struct sk_buff **tail_msdu, u32 *npackets, u32 *ppdu_id) @@ -4461,9 +4481,15 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, struct hal_reo_entrance_ring *ent_desc = (struct hal_reo_entrance_ring *)ring_entry; int buf_id; + u32 rx_link_buf_info[2]; + u8 rbm; + + if (!ar->ab->hw_params.rxdma1_enable) + rx_ring = &dp->rx_refill_buf_ring; ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, - &sw_cookie, &p_last_buf_addr_info, + &sw_cookie, + &p_last_buf_addr_info, &rbm, &msdu_cnt); if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, @@ -4489,9 +4515,14 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, return rx_bufs_used; } - rx_msdu_link_desc = - (void *)pmon->link_desc_banks[sw_cookie].vaddr + - (paddr - pmon->link_desc_banks[sw_cookie].paddr); + if (ar->ab->hw_params.rxdma1_enable) + rx_msdu_link_desc = + (void *)pmon->link_desc_banks[sw_cookie].vaddr + + (paddr - pmon->link_desc_banks[sw_cookie].paddr); + else + rx_msdu_link_desc = + (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + + (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, &num_msdus); @@ -4587,15 +4618,22 @@ next_msdu: spin_unlock_bh(&rx_ring->idr_lock); } + ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); + ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, - &sw_cookie, + &sw_cookie, &rbm, &p_buf_addr_info); - if (ath11k_dp_rx_monitor_link_desc_return(ar, - p_last_buf_addr_info, - dp->mac_id)) - ath11k_dbg(ar->ab, ATH11K_DBG_DATA, - "dp_rx_monitor_link_desc_return failed"); + if (ar->ab->hw_params.rxdma1_enable) { + if (ath11k_dp_rx_monitor_link_desc_return(ar, + p_last_buf_addr_info, + dp->mac_id)) + ath11k_dbg(ar->ab, ATH11K_DBG_DATA, + "dp_rx_monitor_link_desc_return failed"); + } else { + ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, + HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); + } p_last_buf_addr_info = p_buf_addr_info; @@ -4779,8 +4817,8 @@ mon_deliver_fail: return -EINVAL; } -static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, - struct napi_struct *napi) +static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, + u32 quota, struct napi_struct *napi) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; @@ -4788,10 +4826,16 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, void *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; + u32 ring_id; struct ath11k_pdev_mon_stats *rx_mon_stats; u32 npackets = 0; - mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + if (ar->ab->hw_params.rxdma1_enable) + ring_id = dp->rxdma_mon_dst_ring.ring_id; + else + ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; + + mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; if (!mon_dst_srng) { ath11k_warn(ar->ab, @@ -4814,7 +4858,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, head_msdu = NULL; tail_msdu = NULL; - rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, + rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, &head_msdu, &tail_msdu, &npackets, &ppdu_id); @@ -4841,15 +4885,21 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, if (rx_bufs_used) { rx_mon_stats->dest_ppdu_done++; - ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, - &dp->rxdma_mon_buf_ring, - rx_bufs_used, - HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC); + if (ar->ab->hw_params.rxdma1_enable) + ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, + &dp->rxdma_mon_buf_ring, + rx_bufs_used, + HAL_RX_BUF_RBM_SW3_BM); + else + ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, + &dp->rx_refill_buf_ring, + rx_bufs_used, + HAL_RX_BUF_RBM_SW3_BM); } } static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, - u32 quota, + int mac_id, u32 quota, struct napi_struct *napi) { struct ath11k_pdev_dp *dp = &ar->dp; @@ -4873,7 +4923,7 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) { rx_mon_stats->status_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; - ath11k_dp_rx_mon_dest_process(ar, quota, napi); + ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi); pmon->mon_ppdu_status = DP_PPDU_STATUS_START; } dev_kfree_skb_any(status_skb); @@ -4888,10 +4938,10 @@ static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; int num_buffs_reaped = 0; - num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, + num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget, &pmon->rx_status_q); if (num_buffs_reaped) - ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); + ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi); return num_buffs_reaped; } diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h index 88bbcae14e34..fbea45f79c9b 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.h +++ b/drivers/net/wireless/ath/ath11k/dp_rx.h @@ -74,8 +74,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, - enum hal_rx_buf_return_buf_manager mgr, - gfp_t gfp); + enum hal_rx_buf_return_buf_manager mgr); int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, const void *ptr, void *data), @@ -87,8 +86,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, - enum hal_rx_buf_return_buf_manager mgr, - gfp_t gfp); + enum hal_rx_buf_return_buf_manager mgr); int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar); int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar); int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id); diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 98209cccd639..3d962eee4d61 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -254,6 +254,8 @@ tcl_ring_sel: ath11k_hal_srng_access_end(ab, tcl_ring); + ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]); + spin_unlock_bh(&tcl_ring->lock); ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ", @@ -536,6 +538,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) u32 msdu_id; u8 mac_id; + spin_lock_bh(&status_ring->lock); + ath11k_hal_srng_access_begin(ab, status_ring); while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) != @@ -555,6 +559,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) ath11k_hal_srng_access_end(ab, status_ring); + spin_unlock_bh(&status_ring->lock); + while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) { struct hal_wbm_release_ring *tx_status; u32 desc_id; @@ -786,9 +792,9 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> HAL_ADDR_MSB_REG_SHIFT; - cmd->ring_msi_addr_lo = 0; - cmd->ring_msi_addr_hi = 0; - cmd->msi_data = 0; + cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff; + cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff; + cmd->msi_data = params.msi_data; cmd->intr_info = FIELD_PREP( HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH, @@ -804,6 +810,15 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, params.low_threshold); } + ath11k_dbg(ab, ATH11k_DBG_HAL, + "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n", + __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi, + cmd->msi_data); + + ath11k_dbg(ab, ATH11k_DBG_HAL, + "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n", + ring_id, ring_type, cmd->intr_info, cmd->info2); + ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free; @@ -868,24 +883,27 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) int len = sizeof(*cmd); u8 pdev_mask; int ret; + int i; - skb = ath11k_htc_alloc_skb(ab, len); - if (!skb) - return -ENOMEM; - - skb_put(skb, len); - cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; - cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, - HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); - - pdev_mask = 1 << (ar->pdev_idx); - cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); - cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); - - ret = ath11k_htc_send(&ab->htc, dp->eid, skb); - if (ret) { - dev_kfree_skb_any(skb); - return ret; + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; + cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, + HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); + + pdev_mask = 1 << (i + 1); + cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); + cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); + + ret = ath11k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } } return 0; @@ -1028,10 +1046,23 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) HTT_RX_MON_MO_DATA_FILTER_FLASG3; } - ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, - HAL_RXDMA_MONITOR_BUF, - DP_RXDMA_REFILL_RING_SIZE, - &tlv_filter); + if (ab->hw_params.rxdma1_enable) { + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, + HAL_RXDMA_MONITOR_BUF, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + } else if (!reset) { + /* set in monitor mode only */ + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + ring_id = dp->rx_mac_buf_ring[i].ring_id; + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, + dp->mac_id + i, + HAL_RXDMA_BUF, + 1024, + &tlv_filter); + } + } + if (ret) return ret; @@ -1050,5 +1081,9 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) &tlv_filter); } + if (!ar->ab->hw_params.rxdma1_enable) + mod_timer(&ar->ab->mon_reap_timer, jiffies + + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); + return ret; } diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 5cd948f55b95..9904c0eb7587 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -330,7 +330,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab), - (u32)srng->msi_addr); + srng->msi_addr); val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> @@ -344,7 +344,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, srng->msi_data); } - ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); + ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> @@ -409,7 +409,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab), - (u32)srng->msi_addr); + srng->msi_addr); val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> @@ -424,7 +424,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, srng->msi_data); } - ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); + ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> @@ -560,6 +560,8 @@ void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, params->intr_batch_cntr_thres_entries = srng->intr_batch_cntr_thres_entries; params->low_threshold = srng->u.src_ring.low_threshold; + params->msi_addr = srng->msi_addr; + params->msi_data = srng->msi_data; params->flags = srng->flags; } @@ -1018,8 +1020,16 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, lmac_idx); srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; } else { - srng->u.src_ring.hp_addr = + if (!ab->hw_params.supports_shadow_regs) + srng->u.src_ring.hp_addr = (u32 *)((unsigned long)ab->mem + reg_base); + else + ath11k_dbg(ab, ATH11k_DBG_HAL, + "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n", + type, ring_num, + reg_base, + (unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->mem); } } else { /* During initialization loop count in all the descriptors @@ -1043,9 +1053,18 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, lmac_idx); srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; } else { - srng->u.dst_ring.tp_addr = + if (!ab->hw_params.supports_shadow_regs) + srng->u.dst_ring.tp_addr = (u32 *)((unsigned long)ab->mem + reg_base + (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))); + else + ath11k_dbg(ab, ATH11k_DBG_HAL, + "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n", + type, ring_num, + reg_base + (HAL_REO1_RING_TP(ab) - + HAL_REO1_RING_HP(ab)), + (unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->mem); } } @@ -1062,6 +1081,112 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, return ring_id; } +static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab, + int shadow_cfg_idx, + enum hal_ring_type ring_type, + int ring_num) +{ + struct hal_srng *srng; + struct ath11k_hal *hal = &ab->hal; + int ring_id; + struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; + + ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0); + if (ring_id < 0) + return; + + srng = &hal->srng_list[ring_id]; + + if (srng_config->ring_dir == HAL_SRNG_DIR_DST) + srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + + (unsigned long)ab->mem); + else + srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + + (unsigned long)ab->mem); +} + +int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, + enum hal_ring_type ring_type, + int ring_num) +{ + struct ath11k_hal *hal = &ab->hal; + struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; + int shadow_cfg_idx = hal->num_shadow_reg_configured; + u32 target_reg; + + if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS) + return -EINVAL; + + hal->num_shadow_reg_configured++; + + target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START]; + target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] * + ring_num; + + /* For destination ring, shadow the TP */ + if (srng_config->ring_dir == HAL_SRNG_DIR_DST) + target_reg += HAL_OFFSET_FROM_HP_TO_TP; + + hal->shadow_reg_addr[shadow_cfg_idx] = target_reg; + + /* update hp/tp addr to hal structure*/ + ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type, + ring_num); + + ath11k_dbg(ab, ATH11k_DBG_HAL, + "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d", + target_reg, + HAL_SHADOW_REG(shadow_cfg_idx), + shadow_cfg_idx, + ring_type, ring_num); + + return 0; +} + +void ath11k_hal_srng_shadow_config(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + int ring_type, ring_num; + + /* update all the non-CE srngs. */ + for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) { + struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; + + if (ring_type == HAL_CE_SRC || + ring_type == HAL_CE_DST || + ring_type == HAL_CE_DST_STATUS) + continue; + + if (srng_config->lmac_ring) + continue; + + for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++) + ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num); + } +} + +void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, + u32 **cfg, u32 *len) +{ + struct ath11k_hal *hal = &ab->hal; + + *len = hal->num_shadow_reg_configured; + *cfg = hal->shadow_reg_addr; +} + +void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab, + struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + /* check whether the ring is emptry. Update the shadow + * HP only when then ring isn't' empty. + */ + if (srng->ring_dir == HAL_SRNG_DIR_SRC && + *srng->u.src_ring.tp_addr != srng->u.src_ring.hp) + ath11k_hal_srng_access_end(ab, srng); +} + static int ath11k_hal_srng_create_config(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 85192d170b6b..1f1b29cd0aa3 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -31,8 +31,12 @@ struct ath11k_base; #define HAL_DSCP_TID_TBL_SIZE 24 /* calculate the register address from bar0 of shadow register x */ -#define SHADOW_BASE_ADDRESS 0x00003024 -#define SHADOW_NUM_REGISTERS 36 +#define HAL_SHADOW_BASE_ADDR 0x000008fc +#define HAL_SHADOW_NUM_REGS 36 +#define HAL_HP_OFFSET_IN_REG_START 1 +#define HAL_OFFSET_FROM_HP_TO_TP 4 + +#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x))) /* WCSS Relative address */ #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 @@ -882,7 +886,7 @@ struct ath11k_hal { u8 current_blk_index; /* shadow register configuration */ - u32 shadow_reg_addr[SHADOW_NUM_REGISTERS]; + u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; int num_shadow_reg_configured; }; @@ -935,5 +939,12 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, int ath11k_hal_srng_init(struct ath11k_base *ath11k); void ath11k_hal_srng_deinit(struct ath11k_base *ath11k); void ath11k_hal_dump_srng_stats(struct ath11k_base *ab); - +void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, + u32 **cfg, u32 *len); +int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, + enum hal_ring_type ring_type, + int ring_num); +void ath11k_hal_srng_shadow_config(struct ath11k_base *ab); +void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab, + struct hal_srng *srng); #endif diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index 4bbad2e341ee..fac2396edf32 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -256,6 +256,8 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng, break; } + ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer); + out: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); @@ -1195,7 +1197,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab, void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, void **pp_buf_addr, - u32 *msdu_cnt) + u8 *rbm, u32 *msdu_cnt) { struct hal_reo_entrance_ring *reo_ent_ring = (struct hal_reo_entrance_ring *)rx_desc; @@ -1217,6 +1219,8 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, *sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buf_addr_info->info1); + *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, + buf_addr_info->info1); *pp_buf_addr = (void *)buf_addr_info; } diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h index c436191ae1e8..d464a270c049 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.h +++ b/drivers/net/wireless/ath/ath11k/hal_rx.h @@ -321,7 +321,7 @@ void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc, dma_addr_t *paddr, u32 *desc_bank); void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, u32 *sw_cookie, - void **pp_buf_addr_info, + void **pp_buf_addr_info, u8 *rbm, u32 *msdu_cnt); enum hal_rx_mon_status ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab, diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c index 4de2350dfbf3..6b57dc273e0b 100644 --- a/drivers/net/wireless/ath/ath11k/htc.c +++ b/drivers/net/wireless/ath/ath11k/htc.c @@ -515,6 +515,12 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc) return -ECOMM; } + /* For QCA6390, wmi endpoint uses 1 credit to avoid + * back-to-back write. + */ + if (ab->hw_params.supports_shadow_regs) + htc->total_transmit_credits = 1; + ath11k_htc_setup_target_buffer_assignments(htc); return 0; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 975d44e9c083..1dda4257e6d7 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -156,6 +156,11 @@ struct ath11k_hw_params { bool htt_peer_map_v2; bool tcl_0_only; u8 spectral_fft_sz; + + u16 interface_modes; + bool supports_monitor; + bool supports_shadow_regs; + bool idle_ps; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index e0cac9b61af8..3f63a7bd6b59 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -758,21 +758,12 @@ static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id) static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) { - struct ath11k *ar = hw->priv; - int ret = 0; - /* mac80211 requires this op to be present and that's why * there's an empty function, this can be extended when * required. */ - mutex_lock(&ar->conf_mutex); - - /* TODO: Handle configuration changes as appropriate */ - - mutex_unlock(&ar->conf_mutex); - - return ret; + return 0; } static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) @@ -2994,7 +2985,8 @@ static int ath11k_mac_station_add(struct ath11k *ar, goto free_tx_stats; } - if (ab->hw_params.vdev_start_delay) { + if (ab->hw_params.vdev_start_delay && + arvif->vdev_type != WMI_VDEV_TYPE_AP) { ret = ath11k_start_vdev_delay(ar->hw, vif); if (ret) { ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); @@ -4216,6 +4208,15 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw) rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], &ab->pdevs[ar->pdev_idx]); + /* allow device to enter IMPS */ + if (ab->hw_params.idle_ps) { + ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG, + 1, pdev->pdev_id); + if (ret) { + ath11k_err(ab, "failed to enable idle ps: %d\n", ret); + goto err; + } + } return 0; err: @@ -4351,7 +4352,7 @@ static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar, } static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; @@ -4702,6 +4703,10 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, ath11k_warn(ar->ab, "fail to set monitor filter: %d\n", ret); } + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n", + changed_flags, *total_flags, reset_flag); + mutex_unlock(&ar->conf_mutex); } @@ -5207,6 +5212,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = (void *)vif->drv_priv; int ret; + struct peer_create_params param; mutex_lock(&ar->conf_mutex); @@ -5215,7 +5221,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, ctx, arvif->vdev_id); /* for QCA6390 bss peer must be created before vdev_start */ - if (ab->hw_params.vdev_start_delay) { + if (ab->hw_params.vdev_start_delay && + arvif->vdev_type != WMI_VDEV_TYPE_AP && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); mutex_unlock(&ar->conf_mutex); return 0; @@ -5226,6 +5234,13 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, return -EBUSY; } + if (ab->hw_params.vdev_start_delay) { + param.vdev_id = arvif->vdev_id; + param.peer_type = WMI_PEER_TYPE_DEFAULT; + param.peer_addr = ar->mac_addr; + ret = ath11k_peer_create(ar, arvif, NULL, ¶m); + } + ret = ath11k_mac_vdev_start(arvif, &ctx->def); if (ret) { ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", @@ -5271,6 +5286,11 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, WARN_ON(!arvif->is_started); + if (ab->hw_params.vdev_start_delay && + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && + ath11k_peer_find_by_addr(ab, ar->mac_addr)) + ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + ret = ath11k_mac_vdev_stop(arvif); if (ret) ath11k_warn(ab, "failed to stop vdev %i: %d\n", @@ -5278,6 +5298,10 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = false; + if (ab->hw_params.vdev_start_delay && + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) + ath11k_wmi_vdev_down(ar, arvif->vdev_id); + mutex_unlock(&ar->conf_mutex); } @@ -5878,35 +5902,6 @@ static const struct ieee80211_ops ath11k_ops = { #endif }; -static const struct ieee80211_iface_limit ath11k_if_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 16, - .types = BIT(NL80211_IFTYPE_AP) -#ifdef CONFIG_MAC80211_MESH - | BIT(NL80211_IFTYPE_MESH_POINT) -#endif - }, -}; - -static const struct ieee80211_iface_combination ath11k_if_comb[] = { - { - .limits = ath11k_if_limits, - .n_limits = ARRAY_SIZE(ath11k_if_limits), - .max_interfaces = 16, - .num_different_channels = 1, - .beacon_int_infra_match = true, - .beacon_int_min_gcd = 100, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80), - }, -}; - static void ath11k_mac_update_ch_list(struct ath11k *ar, struct ieee80211_supported_band *band, u32 freq_low, u32 freq_high) @@ -6032,6 +6027,50 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, return 0; } +static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) +{ + struct ath11k_base *ab = ar->ab; + struct ieee80211_iface_combination *combinations; + struct ieee80211_iface_limit *limits; + int n_limits; + + combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); + if (!combinations) + return -ENOMEM; + + n_limits = 2; + + limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); + if (!limits) + return -ENOMEM; + + limits[0].max = 1; + limits[0].types |= BIT(NL80211_IFTYPE_STATION); + + limits[1].max = 16; + limits[1].types |= BIT(NL80211_IFTYPE_AP); + + if (IS_ENABLED(CONFIG_MAC80211_MESH) && + ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) + limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); + + combinations[0].limits = limits; + combinations[0].n_limits = n_limits; + combinations[0].max_interfaces = 16; + combinations[0].num_different_channels = 1; + combinations[0].beacon_int_infra_match = true; + combinations[0].beacon_int_min_gcd = 100; + combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80); + + ar->hw->wiphy->iface_combinations = combinations; + ar->hw->wiphy->n_iface_combinations = 1; + + return 0; +} + static const u8 ath11k_if_types_ext_capa[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, @@ -6082,6 +6121,9 @@ static void __ath11k_mac_unregister(struct ath11k *ar) kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); + kfree(ar->hw->wiphy->iface_combinations[0].limits); + kfree(ar->hw->wiphy->iface_combinations); + SET_IEEE80211_DEV(ar->hw, NULL); } @@ -6133,12 +6175,16 @@ static int __ath11k_mac_register(struct ath11k *ar) ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); ath11k_mac_setup_he_cap(ar, cap); + ret = ath11k_mac_setup_iface_combinations(ar); + if (ret) { + ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret); + goto err_free_channels; + } + ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask; ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask; - ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_MESH_POINT); + ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes; ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); @@ -6200,9 +6246,6 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->vif_data_size = sizeof(struct ath11k_vif); ar->hw->sta_data_size = sizeof(struct ath11k_sta); - ar->hw->wiphy->iface_combinations = ath11k_if_comb; - ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb); - wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); @@ -6224,25 +6267,37 @@ static int __ath11k_mac_register(struct ath11k *ar) ret = ieee80211_register_hw(ar->hw); if (ret) { ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); - goto err_free; + goto err_free_if_combs; } + if (!ab->hw_params.supports_monitor) + /* There's a race between calling ieee80211_register_hw() + * and here where the monitor mode is enabled for a little + * while. But that time is so short and in practise it make + * a difference in real life. + */ + ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); + /* Apply the regd received during initialization */ ret = ath11k_regd_update(ar, true); if (ret) { ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret); - goto err_free; + goto err_free_if_combs; } ret = ath11k_debugfs_register(ar); if (ret) { ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret); - goto err_free; + goto err_free_if_combs; } return 0; -err_free: +err_free_if_combs: + kfree(ar->hw->wiphy->iface_combinations[0].limits); + kfree(ar->hw->wiphy->iface_combinations); + +err_free_channels: kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index aec0b593c6a7..d7eb6b7160bb 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -24,6 +24,16 @@ #define WINDOW_START 0x80000 #define WINDOW_RANGE_MASK GENMASK(18, 0) +#define TCSR_SOC_HW_VERSION 0x0224 +#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8) +#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) + +/* BAR0 + 4k is always accessible, and no + * need to force wakeup. + * 4K - 32 = 0xFE0 + */ +#define ACCESS_ALWAYS_OFF 0xFE0 + #define QCA6390_DEVICE_ID 0x1101 static const struct pci_device_id ath11k_pci_id_table[] = { @@ -124,6 +134,13 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + /* for offset beyond BAR + 4K - 32, may + * need to wakeup MHI to access. + */ + if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + if (offset < WINDOW_START) { iowrite32(value, ab->mem + offset); } else { @@ -132,6 +149,10 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); spin_unlock_bh(&ab_pci->window_lock); } + + if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); } u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) @@ -139,6 +160,13 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 val; + /* for offset beyond BAR + 4K - 32, may + * need to wakeup MHI to access. + */ + if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + if (offset < WINDOW_START) { val = ioread32(ab->mem + offset); } else { @@ -148,6 +176,10 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) spin_unlock_bh(&ab_pci->window_lock); } + if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ACCESS_ALWAYS_OFF) + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); + return val; } @@ -582,6 +614,9 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390; + + ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2, + &cfg->shadow_reg_v2_len); } static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) @@ -727,6 +762,8 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); int ret; + ab_pci->register_window = 0; + clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); ath11k_pci_sw_reset(ab_pci->ab); ret = ath11k_mhi_start(ab_pci); @@ -743,6 +780,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ath11k_mhi_stop(ab_pci); + clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); ath11k_pci_force_wake(ab_pci->ab); ath11k_pci_sw_reset(ab_pci->ab); } @@ -771,6 +809,10 @@ static void ath11k_pci_stop(struct ath11k_base *ab) static int ath11k_pci_start(struct ath11k_base *ab) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + ath11k_pci_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); @@ -839,21 +881,11 @@ static int ath11k_pci_probe(struct pci_dev *pdev, { struct ath11k_base *ab; struct ath11k_pci *ab_pci; - enum ath11k_hw_rev hw_rev; + u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor; int ret; dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n"); - switch (pci_dev->device) { - case QCA6390_DEVICE_ID: - hw_rev = ATH11K_HW_QCA6390_HW20; - break; - default: - dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", - pci_dev->device); - return -ENOTSUPP; - } - ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI, &ath11k_pci_bus_params); if (!ab) { @@ -862,7 +894,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev, } ab->dev = &pdev->dev; - ab->hw_rev = hw_rev; pci_set_drvdata(pdev, ab); ab_pci = ath11k_pci_priv(ab); ab_pci->dev_id = pci_dev->device; @@ -878,6 +909,35 @@ static int ath11k_pci_probe(struct pci_dev *pdev, goto err_free_core; } + switch (pci_dev->device) { + case QCA6390_DEVICE_ID: + soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION); + soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, + soc_hw_version); + soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, + soc_hw_version); + + ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n", + soc_hw_version_major, soc_hw_version_minor); + + switch (soc_hw_version_major) { + case 2: + ab->hw_rev = ATH11K_HW_QCA6390_HW20; + break; + default: + dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n", + soc_hw_version_major, soc_hw_version_minor); + ret = -EOPNOTSUPP; + goto err_pci_free_region; + } + break; + default: + dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", + pci_dev->device); + ret = -EOPNOTSUPP; + goto err_pci_free_region; + } + ret = ath11k_pci_enable_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to enable msi: %d\n", ret); @@ -949,10 +1009,17 @@ static void ath11k_pci_remove(struct pci_dev *pdev) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); + + ath11k_core_deinit(ab); + ath11k_mhi_unregister(ab_pci); + + ath11k_pci_free_irq(ab); ath11k_pci_disable_msi(ab_pci); ath11k_pci_free_region(ab_pci); - ath11k_pci_free_irq(ab); + + ath11k_hal_srng_deinit(ab); + ath11k_ce_free_pipes(ab); ath11k_core_free(ab); } diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index f2f280eb8b55..43562f774a37 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -36,6 +36,10 @@ struct ath11k_msi_config { struct ath11k_msi_user *users; }; +enum ath11k_pci_flags { + ATH11K_PCI_FLAG_INIT_DONE, +}; + struct ath11k_pci { struct pci_dev *pdev; struct ath11k_base *ab; @@ -48,6 +52,9 @@ struct ath11k_pci { /* protects register_window above */ spinlock_t window_lock; + + /* enum ath11k_pci_flags */ + unsigned long flags; }; static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index b906b50ee57e..c2b165158225 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -3,6 +3,8 @@ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ +#include <linux/elf.h> + #include "qmi.h" #include "core.h" #include "debug.h" @@ -1990,6 +1992,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) struct qmi_txn txn = {}; int ret; const u8 *temp; + int bdf_type; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) @@ -2006,6 +2009,13 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) temp = bd.data; remaining = bd.len; + if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) + bdf_type = ATH11K_QMI_BDF_TYPE_ELF; + else + bdf_type = ATH11K_QMI_BDF_TYPE_BIN; + + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type); + while (remaining) { req->valid = 1; req->file_id_valid = 1; @@ -2015,7 +2025,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab) req->seg_id_valid = 1; req->data_valid = 1; req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; - req->bdf_type = ATH11K_QMI_BDF_TYPE_BIN; + req->bdf_type = bdf_type; req->bdf_type_valid = 1; req->end_valid = 1; req->end = 0; @@ -2265,7 +2275,18 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum; } req->shadow_reg_valid = 0; - req->shadow_reg_v2_valid = 0; + + /* set shadow v2 configuration */ + if (ab->hw_params.supports_shadow_regs) { + req->shadow_reg_v2_valid = 1; + req->shadow_reg_v2_len = min_t(u32, + ab->qmi.ce_cfg.shadow_reg_v2_len, + QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01); + memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2, + sizeof(u32) * req->shadow_reg_v2_len); + } else { + req->shadow_reg_v2_valid = 0; + } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp); diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 60e904683165..b0a818f0401b 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -77,7 +77,7 @@ struct ath11k_qmi_ce_cfg { int svc_to_ce_map_len; const u8 *shadow_reg; int shadow_reg_len; - u8 *shadow_reg_v2; + u32 *shadow_reg_v2; int shadow_reg_v2_len; }; diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 0ba80e6f3979..f6a1f0352989 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -206,7 +206,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init) ab = ar->ab; pdev_id = ar->pdev_idx; - spin_lock(&ab->base_lock); + spin_lock_bh(&ab->base_lock); if (init) { /* Apply the regd received during init through @@ -227,7 +227,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init) if (!regd) { ret = -EINVAL; - spin_unlock(&ab->base_lock); + spin_unlock_bh(&ab->base_lock); goto err; } @@ -238,7 +238,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init) if (regd_copy) ath11k_copy_regd(regd, regd_copy); - spin_unlock(&ab->base_lock); + spin_unlock_bh(&ab->base_lock); if (!regd_copy) { ret = -ENOMEM; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 82392bc7123d..8eca92520837 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -6530,7 +6530,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) break; /* TODO: Add remaining events */ default: - ath11k_warn(ab, "Unknown eventid: 0x%x\n", id); + ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); break; } diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index f4c9befb3949..fab14e0a87b9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h @@ -1328,27 +1328,6 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = { {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, }; -static const u32 ar9580_1p0_pcie_phy_clkreq_enable_L1[][2] = { - /* Addr allmodes */ - {0x00004040, 0x0835365e}, - {0x00004040, 0x0008003b}, - {0x00004044, 0x00000000}, -}; - -static const u32 ar9580_1p0_pcie_phy_clkreq_disable_L1[][2] = { - /* Addr allmodes */ - {0x00004040, 0x0831365e}, - {0x00004040, 0x0008003b}, - {0x00004044, 0x00000000}, -}; - -static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = { - /* Addr allmodes */ - {0x00004040, 0x0831265e}, - {0x00004040, 0x0008003b}, - {0x00004044, 0x00000000}, -}; - static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = { /* Addr 5G 2G */ {0x00009814, 0x3400c00f, 0x3400c00f}, diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c index 8441031b667c..2d0780fefd47 100644 --- a/drivers/net/wireless/ath/wcn36xx/pmc.c +++ b/drivers/net/wireless/ath/wcn36xx/pmc.c @@ -31,6 +31,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn, if (!ret) { wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n"); vif_priv->pw_state = WCN36XX_BMPS; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; } else { /* * One of the reasons why HW will not enter BMPS is because @@ -55,6 +56,7 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn, } wcn36xx_smd_exit_bmps(wcn, vif); vif_priv->pw_state = WCN36XX_FULL_POWER; + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 4ac1e4fb7812..3dd28f5fef19 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1426,6 +1426,11 @@ void brcmf_detach(struct device *dev) #endif brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN); + /* make sure primary interface removed last */ + for (i = BRCMF_MAX_IFS - 1; i > -1; i--) { + if (drvr->iflist[i]) + brcmf_remove_interface(drvr->iflist[i], false); + } brcmf_bus_stop(drvr->bus_if); brcmf_fweh_detach(drvr); @@ -1436,12 +1441,6 @@ void brcmf_detach(struct device *dev) drvr->mon_if = NULL; } - /* make sure primary interface removed last */ - for (i = BRCMF_MAX_IFS - 1; i > -1; i--) { - if (drvr->iflist[i]) - brcmf_del_if(drvr, drvr->iflist[i]->bsscfgidx, false); - } - if (drvr->config) { brcmf_p2p_detach(&drvr->config->p2p); brcmf_cfg80211_detach(drvr->config); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index c847062dd393..430d2cca98b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr) { struct brcmf_fweh_info *fweh = &drvr->fweh; - /* cancel the worker */ - cancel_work_sync(&fweh->event_work); - WARN_ON(!list_empty(&fweh->event_q)); - memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); + /* cancel the worker if initialized */ + if (fweh->event_work.func) { + cancel_work_sync(&fweh->event_work); + WARN_ON(!list_empty(&fweh->event_q)); + memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); + } } /** diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index fbcd1405aeea..85c6fed28f8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -13,6 +13,7 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o +iwlwifi-objs += queue/tx.o iwlwifi-objs += fw/img.o fw/notif-wait.o iwlwifi-objs += fw/dbg.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index efe427049a6e..60e0640d07dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -57,7 +57,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 56 +#define IWL_22000_UCODE_API_MAX 57 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -89,6 +89,9 @@ #define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" #define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-" #define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-" +#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-" +#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-" +#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" @@ -118,6 +121,12 @@ IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" #define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \ IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -335,14 +344,32 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = { .bisr_workaround = 1, }; +const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .mq_rx_supported = true, + .use_tfh = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000 +}; + +const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; -const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz"; +const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz"; +const char iwl_ma_name[] = "Intel(R) Wi-Fi 6"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; const char iwl_ax200_killer_1650x_name[] = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)"; +const char iwl_ax201_killer_1650s_name[] = + "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)"; +const char iwl_ax201_killer_1650i_name[] = + "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, @@ -539,7 +566,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { }; const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { - .name = "Intel(R) Wi-Fi 6 AX211 160MHz", + .name = iwl_ax211_name, .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, @@ -547,7 +574,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { }; const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { - .name = "Intel(R) Wi-Fi 6 AX211 160MHz", + .name = iwl_ax211_name, .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, @@ -565,7 +592,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { }; const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { - .name = "Intel(R) Wi-Fi 6 AX411 160MHz", + .name = iwl_ax411_name, .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, @@ -573,7 +600,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { }; const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { - .name = "Intel(R) Wi-Fi 6 AX411 160MHz", + .name = iwl_ax411_name, .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, @@ -583,7 +610,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { }; const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = { - .name = "Intel(R) Wi-Fi 6 AX411 160MHz", + .name = iwl_ax411_name, .fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, @@ -591,13 +618,35 @@ const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = { }; const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = { - .name = "Intel(R) Wi-Fi 6 AX211 160MHz", + .name = iwl_ax211_name, .fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE, .uhb_supported = true, IWL_DEVICE_AX210, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwlax201_cfg_snj_hr_b0 = { + .name = iwl_ax201_name, + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = { + .fw_name_pre = IWL_MA_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = { + .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -612,3 +661,6 @@ MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index dc769b580431..e93656e461ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -118,8 +118,8 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object); * method (DSM) interface. The returned acpi object must be freed by calling * function. */ -void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, - union acpi_object *args) +static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, + union acpi_object *args) { union acpi_object *obj; @@ -400,9 +400,9 @@ out_free: } IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); -int iwl_sar_set_profile(union acpi_object *table, - struct iwl_sar_profile *profile, - bool enabled) +static int iwl_sar_set_profile(union acpi_object *table, + struct iwl_sar_profile *profile, + bool enabled) { int i; @@ -418,18 +418,13 @@ int iwl_sar_set_profile(union acpi_object *table, return 0; } -IWL_EXPORT_SYMBOL(iwl_sar_set_profile); -int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, - __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS], - int prof_a, int prof_b) +static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_subbands, + int prof_a, int prof_b) { - int i, j, idx; int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; - - BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2); - BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS != - ACPI_SAR_TABLE_SIZE); + int i, j, idx; for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) { struct iwl_sar_profile *prof; @@ -461,9 +456,9 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, "SAR EWRD: chain %d profile index %d\n", i, profs[i]); IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); - for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) { - idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j; - per_chain_restriction[i][j] = + for (j = 0; j < n_subbands; j++) { + idx = i * ACPI_SAR_NUM_SUB_BANDS + j; + per_chain[i * n_subbands + j] = cpu_to_le16(prof->table[idx]); IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", j, prof->table[idx]); @@ -472,6 +467,23 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, return 0; } + +int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_tables, u32 n_subbands, + int prof_a, int prof_b) +{ + int i, ret = 0; + + for (i = 0; i < n_tables; i++) { + ret = iwl_sar_fill_table(fwrt, + &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAIN_LIMITS], + n_subbands, prof_a, prof_b); + if (ret) + break; + } + + return ret; +} IWL_EXPORT_SYMBOL(iwl_sar_select_profile); int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) @@ -632,25 +644,8 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); -int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, - struct iwl_host_cmd *cmd) -{ - struct iwl_geo_tx_power_profiles_resp *resp; - int ret; - - resp = (void *)cmd->resp_pkt->data; - ret = le32_to_cpu(resp->profile_idx); - if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) { - ret = -EIO; - IWL_WARN(fwrt, "Invalid geographic profile idx (%d)\n", ret); - } - - return ret; -} -IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile); - int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table) + struct iwl_per_chain_offset *table, u32 n_bands) { int ret, i, j; @@ -666,16 +661,11 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, return -ENOENT; } - BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * - ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); - - BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); - for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { struct iwl_per_chain_offset *chain = (struct iwl_per_chain_offset *)&table[i]; - for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) { + for (j = 0; j < n_bands; j++) { u8 *value; value = &fwrt->geo_profiles[i].values[j * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 0ada9eddb8b1..71ef8647d7b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -89,6 +89,7 @@ #define ACPI_SAR_NUM_CHAIN_LIMITS 2 #define ACPI_SAR_NUM_SUB_BANDS 5 +#define ACPI_SAR_NUM_TABLES 1 #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ @@ -107,10 +108,10 @@ #define ACPI_WGDS_NUM_BANDS 2 #define ACPI_WGDS_TABLE_SIZE 3 -#define ACPI_PPAG_NUM_CHAINS 2 -#define ACPI_PPAG_NUM_SUB_BANDS 5 -#define ACPI_PPAG_WIFI_DATA_SIZE ((ACPI_PPAG_NUM_CHAINS * \ - ACPI_PPAG_NUM_SUB_BANDS) + 3) +#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \ + IWL_NUM_SUB_BANDS) + 3) +#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ + IWL_NUM_SUB_BANDS_V2) + 3) /* PPAG gain value bounds in 1/8 dBm */ #define ACPI_PPAG_MIN_LB -16 @@ -133,15 +134,26 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, }; +enum iwl_dsm_values_srd { + DSM_VALUE_SRD_ACTIVE, + DSM_VALUE_SRD_PASSIVE, + DSM_VALUE_SRD_DISABLE, + DSM_VALUE_SRD_MAX +}; + +enum iwl_dsm_values_indonesia { + DSM_VALUE_INDONESIA_DISABLE, + DSM_VALUE_INDONESIA_ENABLE, + DSM_VALUE_INDONESIA_RESERVED, + DSM_VALUE_INDONESIA_MAX +}; + #ifdef CONFIG_ACPI struct iwl_fw_runtime; void *iwl_acpi_get_object(struct device *dev, acpi_string method); -void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, - union acpi_object *args); - int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, @@ -171,12 +183,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev); */ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk); -int iwl_sar_set_profile(union acpi_object *table, - struct iwl_sar_profile *profile, - bool enabled); - int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, - __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS], + __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b); int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt); @@ -187,11 +195,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); -int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, - struct iwl_host_cmd *cmd); - int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table); + struct iwl_per_chain_offset *table, u32 n_bands); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array, int *black_list_size); @@ -237,15 +242,8 @@ static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) return -ENOENT; } -static inline int iwl_sar_set_profile(union acpi_object *table, - struct iwl_sar_profile *profile, - bool enabled) -{ - return -ENOENT; -} - static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, - __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS], + __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b) { return -ENOENT; @@ -271,18 +269,6 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) return false; } -static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt, - struct iwl_host_cmd *cmd) -{ - return -ENOENT; -} - -static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, - struct iwl_per_chain_offset_group *table) -{ - return -ENOENT; -} - static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array, int *black_list_size) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index df1bd0d2450e..a1cac47395bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +30,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -129,19 +128,31 @@ struct iwl_umac_alive { struct iwl_umac_debug_addrs dbg_ptrs; } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ -struct mvm_alive_resp_v3 { +struct iwl_sku_id { + __le32 data[3]; +} __packed; /* SKU_ID_API_S_VER_1 */ + +struct iwl_alive_ntf_v3 { __le16 status; __le16 flags; struct iwl_lmac_alive lmac_data; struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_3 */ +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ + +struct iwl_alive_ntf_v4 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_4 */ -struct mvm_alive_resp { +struct iwl_alive_ntf_v5 { __le16 status; __le16 flags; struct iwl_lmac_alive lmac_data[2]; struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_4 */ + struct iwl_sku_id sku_id; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */ /** * enum iwl_extended_cfg_flag - commands driver may send before diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 4f46f3ed8794..14975a7e7203 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -104,11 +104,12 @@ enum iwl_mvm_command_groups { */ enum iwl_legacy_cmds { /** - * @MVM_ALIVE: + * @UCODE_ALIVE_NTFY: * Alive data from the firmware, as described in - * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. + * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or + * &struct iwl_alive_ntf_v5. */ - MVM_ALIVE = 0x1, + UCODE_ALIVE_NTFY = 0x1, /** * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. @@ -431,8 +432,7 @@ enum iwl_legacy_cmds { /** * @REDUCE_TX_POWER_CMD: - * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4 - * or &struct iwl_dev_tx_power_cmd + * &struct iwl_dev_tx_power_cmd */ REDUCE_TX_POWER_CMD = 0x9f, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index c4562e1f8d18..5db301a6a312 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -451,10 +451,15 @@ union iwl_all_tsc_rsc { struct iwl_aes_rsc_tsc aes; }; /* ALL_TSC_RSC_API_S_VER_2 */ -struct iwl_wowlan_rsc_tsc_params_cmd { +struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 { union iwl_all_tsc_rsc all_tsc_rsc; } __packed; /* ALL_TSC_RSC_API_S_VER_2 */ +struct iwl_wowlan_rsc_tsc_params_cmd { + struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params; + __le32 sta_id; +} __packed; /* ALL_TSC_RSC_API_S_VER_4 */ + #define IWL_MIC_KEY_SIZE 8 struct iwl_mic_keys { u8 tx[IWL_MIC_KEY_SIZE]; @@ -469,17 +474,26 @@ struct iwl_p1k_cache { #define IWL_NUM_RX_P1K_CACHE 2 -struct iwl_wowlan_tkip_params_cmd { +struct iwl_wowlan_tkip_params_cmd_ver_1 { struct iwl_mic_keys mic_keys; struct iwl_p1k_cache tx; struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; } __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ +struct iwl_wowlan_tkip_params_cmd { + struct iwl_mic_keys mic_keys; + struct iwl_p1k_cache tx; + struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; + struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; + u8 reversed[2]; + __le32 sta_id; +} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_2 */ + #define IWL_KCK_MAX_SIZE 32 #define IWL_KEK_MAX_SIZE 32 -struct iwl_wowlan_kek_kck_material_cmd { +struct iwl_wowlan_kek_kck_material_cmd_v2 { u8 kck[IWL_KCK_MAX_SIZE]; u8 kek[IWL_KEK_MAX_SIZE]; __le16 kck_len; @@ -487,6 +501,18 @@ struct iwl_wowlan_kek_kck_material_cmd { __le64 replay_ctr; } __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ +struct iwl_wowlan_kek_kck_material_cmd_v3 { + u8 kck[IWL_KCK_MAX_SIZE]; + u8 kek[IWL_KEK_MAX_SIZE]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; + __le32 akm; + __le32 gtk_cipher; + __le32 igtk_cipher; + __le32 bigtk_cipher; +} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */ + #define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 enum iwl_wowlan_rekey_status { @@ -525,7 +551,7 @@ struct iwl_wowlan_gtk_status_v1 { u8 reserved[3]; u8 decrypt_key[16]; u8 tkip_mic_key[8]; - struct iwl_wowlan_rsc_tsc_params_cmd rsc; + struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ #define WOWLAN_KEY_MAX_SIZE 32 @@ -550,7 +576,7 @@ struct iwl_wowlan_gtk_status { u8 key_flags; u8 reserved[2]; u8 tkip_mic_key[8]; - struct iwl_wowlan_rsc_tsc_params_cmd rsc; + struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */ #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) @@ -635,7 +661,7 @@ struct iwl_wowlan_status_v7 { } __packed; /* WOWLAN_STATUSES_API_S_VER_7 */ /** - * struct iwl_wowlan_status - WoWLAN status + * struct iwl_wowlan_status_v9 - WoWLAN status (version 9) * @gtk: GTK data * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter @@ -653,7 +679,7 @@ struct iwl_wowlan_status_v7 { * @reserved: unused * @wake_packet: wakeup packet */ -struct iwl_wowlan_status { +struct iwl_wowlan_status_v9 { struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; @@ -671,6 +697,44 @@ struct iwl_wowlan_status { u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_API_S_VER_9 */ +/** + * struct iwl_wowlan_status - WoWLAN status + * @gtk: GTK data + * @igtk: IGTK data + * @bigtk: BIGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched pattern + * @non_qos_seq_ctr: non-QoS sequence counter to use next + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @tid_tear_down: bitmap of TIDs torn down + * @reserved: reserved + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_status { + struct iwl_wowlan_gtk_status gtk[1]; + struct iwl_wowlan_igtk_status igtk[1]; + struct iwl_wowlan_igtk_status bigtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + u8 tid_tear_down; + u8 reserved[3]; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_API_S_VER_11 */ + static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk) { return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 74ac65bd545a..95ada51d3f9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -135,6 +135,25 @@ struct iwl_fw_ini_region_err_table { } __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */ /** + * struct iwl_fw_ini_region_special_device_memory - special device memory + * + * Configuration to read a special memory + * + * @type: type of the special memory + * @version: version of the special memory + * @base_addr: base address of the error table + * @size: size of the error table + * @offset: offset to add to &base_addr + */ +struct iwl_fw_ini_region_special_device_memory { + __le16 type; + __le16 version; + __le32 base_addr; + __le32 size; + __le32 offset; +} __packed; /* FW_TLV_DEBUG_REGION_SPECIAL_DEVICE_ADDR_API_S_VER_1 */ + +/** * struct iwl_fw_ini_region_internal_buffer - internal buffer region data * * Configuration to read internal monitor buffer @@ -185,6 +204,7 @@ struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_region_fifos fifos; struct iwl_fw_ini_region_err_table err_table; struct iwl_fw_ini_region_internal_buffer internal_buffer; + struct iwl_fw_ini_region_special_device_memory special_mem; __le32 dram_alloc_id; __le32 tlv_mask; }; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */ @@ -281,6 +301,7 @@ struct iwl_fw_ini_hcmd_tlv { * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration + * @IWL_FW_INI_ALLOCATION_ID_INTERNAL: allocation meant for Intreanl SMEM in D3 * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { @@ -288,6 +309,7 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, + IWL_FW_INI_ALLOCATION_ID_INTERNAL, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ @@ -327,6 +349,7 @@ enum iwl_fw_ini_buffer_location { * @IWL_FW_INI_REGION_CSR: CSR registers * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config + * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { @@ -347,6 +370,7 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_CSR, IWL_FW_INI_REGION_DRAM_IMR, IWL_FW_INI_REGION_PCI_IOSF_CONFIG, + IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY, IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ @@ -362,13 +386,13 @@ enum iwl_fw_ini_region_type { * @IWL_FW_INI_TIME_POINT_FW_ASSERT: FW assert * @IWL_FW_INI_TIME_POINT_FW_HW_ERROR: FW HW error * @IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG: TFD queue hang - * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION: DHC cmd response and notif + * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: DHC cmd response and notif * @IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: FW response or notification. * data field holds id and group * @IWL_FW_INI_TIME_POINT_USER_TRIGGER: user trigger time point * @IWL_FW_INI_TIME_POINT_PERIODIC: periodic timepoint that fires in constant * intervals. data field holds the interval time in msec - * @IWL_FW_INI_TIME_POINT_WDG_TIMEOUT: watchdog timeout + * @IWL_FW_INI_TIME_POINT_RESERVED: reserved * @IWL_FW_INI_TIME_POINT_HOST_ASSERT: Unused * @IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT: alive timeout * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE: device enable @@ -395,11 +419,11 @@ enum iwl_fw_ini_time_point { IWL_FW_INI_TIME_POINT_FW_ASSERT, IWL_FW_INI_TIME_POINT_FW_HW_ERROR, IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG, - IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION, + IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION, IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, IWL_FW_INI_TIME_POINT_USER_TRIGGER, IWL_FW_INI_TIME_POINT_PERIODIC, - IWL_FW_INI_TIME_POINT_WDG_TIMEOUT, + IWL_FW_INI_TIME_POINT_RESERVED, IWL_FW_INI_TIME_POINT_HOST_ASSERT, IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT, IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 1df2e497fabf..465a8e3974e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -321,12 +321,54 @@ struct iwl_tof_responder_config_cmd { * data (if exists) follows, and then 0-padding again to complete a * 4-multiple long buffer. */ -struct iwl_tof_responder_dyn_config_cmd { +struct iwl_tof_responder_dyn_config_cmd_v2 { __le32 lci_len; __le32 civic_len; u8 lci_civic[]; } __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */ +#define IWL_LCI_MAX_SIZE 160 +#define IWL_CIVIC_MAX_SIZE 160 +#define HLTK_11AZ_LEN 32 + +/** + * enum iwl_responder_dyn_cfg_valid_flags - valid flags for dyn_config_cmd + * @IWL_RESPONDER_DYN_CFG_VALID_LCI: LCI data is valid + * @IWL_RESPONDER_DYN_CFG_VALID_CIVIC: Civic data is valid + * @IWL_RESPONDER_DYN_CFG_VALID_PASN_STA: the pasn_addr, HLTK and cipher fields + * are valid. + */ +enum iwl_responder_dyn_cfg_valid_flags { + IWL_RESPONDER_DYN_CFG_VALID_LCI = BIT(0), + IWL_RESPONDER_DYN_CFG_VALID_CIVIC = BIT(1), + IWL_RESPONDER_DYN_CFG_VALID_PASN_STA = BIT(2), +}; + +/** + * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings + * @cipher: The negotiated cipher. see &enum iwl_location_cipher. + * @valid_flags: flags indicating which fields in the command are valid. see + * &enum iwl_responder_dyn_cfg_valid_flags. + * @lci_len: length of the LCI data in bytes + * @civic_len: length of the Civic data in bytes + * @lci_buf: the LCI buffer + * @civic_buf: the Civic buffer + * @hltk_buf: HLTK for secure LTF bits generation for the specified station + * @addr: mac address of the station for which to use the HLTK + * @reserved: for alignment + */ +struct iwl_tof_responder_dyn_config_cmd { + u8 cipher; + u8 valid_flags; + u8 lci_len; + u8 civic_len; + u8 lci_buf[IWL_LCI_MAX_SIZE]; + u8 civic_buf[IWL_LCI_MAX_SIZE]; + u8 hltk_buf[HLTK_11AZ_LEN]; + u8 addr[ETH_ALEN]; + u8 reserved[2]; +} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_3 */ + /** * struct iwl_tof_range_req_ext_cmd - extended range req for WLS * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF @@ -507,7 +549,6 @@ enum iwl_location_bw { IWL_LOCATION_BW_80MHZ, }; -#define HLTK_11AZ_LEN 32 #define TK_11AZ_LEN 32 #define LOCATION_BW_POS 4 @@ -552,15 +593,19 @@ struct iwl_tof_range_req_ap_entry_v4 { * @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128 * @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128 * @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256 + * @IWL_LOCATION_CIPHER_INVALID: security is not used. + * @IWL_LOCATION_CIPHER_MAX: maximum value for this enum. */ enum iwl_location_cipher { IWL_LOCATION_CIPHER_CCMP_128, IWL_LOCATION_CIPHER_GCMP_128, IWL_LOCATION_CIPHER_GCMP_256, + IWL_LOCATION_CIPHER_INVALID, + IWL_LOCATION_CIPHER_MAX, }; /** - * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * struct iwl_tof_range_req_ap_entry_v6 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. @@ -588,7 +633,7 @@ enum iwl_location_cipher { * @beacon_interval: beacon interval of the AP in TUs. Only required if * &IWL_INITIATOR_AP_FLAGS_TB is set. */ -struct iwl_tof_range_req_ap_entry { +struct iwl_tof_range_req_ap_entry_v6 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; @@ -607,6 +652,61 @@ struct iwl_tof_range_req_ap_entry { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */ /** + * struct iwl_tof_range_req_ap_entry_v7 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + */ +struct iwl_tof_range_req_ap_entry_v7 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + __le16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */ + +/** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) @@ -772,7 +872,7 @@ struct iwl_tof_range_req_cmd_v8 { } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */ /** - * struct iwl_tof_range_req_cmd - start measurement cmd + * struct iwl_tof_range_req_cmd_v9 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response @@ -787,7 +887,7 @@ struct iwl_tof_range_req_cmd_v8 { * TSF of this mac id. 0xff to disable TSF reporting. * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ -struct iwl_tof_range_req_cmd { +struct iwl_tof_range_req_cmd_v9 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; @@ -796,9 +896,37 @@ struct iwl_tof_range_req_cmd { u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; - struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */ +/** + * struct iwl_tof_range_req_cmd_v11 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. + */ +struct iwl_tof_range_req_cmd_v11 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the @@ -960,7 +1088,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 { } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */ /** - * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * struct iwl_tof_range_rsp_ap_entry_ntfy_v5 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. @@ -992,7 +1120,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 { * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy. * @reserved: for alignment */ -struct iwl_tof_range_rsp_ap_entry_ntfy { +struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; @@ -1017,6 +1145,69 @@ struct iwl_tof_range_rsp_ap_entry_ntfy { } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */ /** + * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response) + * @bssid: BSSID of the AP + * @measure_status: current APs measurement status, one of + * &enum iwl_tof_entry_status. + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [pSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [pSec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @last_burst: 1 if no more FTM sessions are scheduled for this responder + * @refusal_period: refusal period in case of + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + * @start_tsf: measurement start time in TSF of the mac specified in the range + * request + * @rx_rate_n_flags: rate and flags of the last FTM frame received from this + * responder + * @tx_rate_n_flags: rate and flags of the last ack sent to this responder + * @t2t3_initiator: as calculated from the algo in the initiator + * @t1t4_responder: as calculated from the algo in the responder + * @common_calib: Calib val that was used in for this AP measurement + * @specific_calib: val that was used in for this AP measurement + * @papd_calib_output: The result of the tof papd calibration that was injected + * into the algorithm. + * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy. + * @reserved: for alignment + * @rx_pn: the last PN used for this responder Rx in case PMF is configured in + * LE byte order. + * @tx_pn: the last PN used for this responder Tx in case PMF is configured in + * LE byte order. + */ +struct iwl_tof_range_rsp_ap_entry_ntfy_v6 { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 last_burst; + u8 refusal_period; + __le32 timestamp; + __le32 start_tsf; + __le32 rx_rate_n_flags; + __le32 tx_rate_n_flags; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; + u8 rttConfidence; + u8 reserved[3]; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; +} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6 */ + +/** * enum iwl_tof_response_status - tof response status * * @IWL_TOF_RESPONSE_SUCCESS: successful range. @@ -1066,21 +1257,37 @@ struct iwl_tof_range_rsp_ntfy_v6 { } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */ /** - * struct iwl_tof_range_rsp_ntfy - ranging response notification + * struct iwl_tof_range_rsp_ntfy_v7 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ -struct iwl_tof_range_rsp_ntfy { +struct iwl_tof_range_rsp_ntfy_v7 { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; - struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */ +/** + * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification + * @request_id: A Token ID of the corresponding Range request + * @num_of_aps: Number of APs results + * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. + * @reserved: reserved + * @ap: per-AP data + */ +struct iwl_tof_range_rsp_ntfy_v8 { + u8 request_id; + u8 num_of_aps; + u8 last_report; + u8 reserved; + struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8 */ + #define IWL_MVM_TOF_MCSI_BUF_SIZE (245) /** * struct iwl_tof_mcsi_notif - used for debug diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 73fb0030c496..afc3eccfa718 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -5,9 +5,8 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,9 +26,8 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -188,9 +186,17 @@ struct iwl_mac_data_ibss { /** * enum iwl_mac_data_policy - policy of the data path for this MAC * @TWT_SUPPORTED: twt is supported + * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to + * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT + * early termination detection. + * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule + * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w) */ enum iwl_mac_data_policy { - TWT_SUPPORTED = BIT(0), + TWT_SUPPORTED = BIT(0), + MORE_DATA_ACK_SUPPORTED = BIT(1), + FLEXIBLE_TWT_SUPPORTED = BIT(2), + PROTECTED_TWT_SUPPORTED = BIT(3), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index b833b80ea3d6..e6a069683462 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -181,15 +179,37 @@ struct iwl_phy_context_cmd_tail { * @ci: channel info * @tail: command tail */ -struct iwl_phy_context_cmd { +struct iwl_phy_context_cmd_v1 { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_1 */ + /* PHY_CONTEXT_DATA_API_S_VER_3 */ __le32 apply_time; __le32 tx_param_color; struct iwl_fw_channel_info ci; struct iwl_phy_context_cmd_tail tail; } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ +/** + * struct iwl_phy_context_cmd - config of the PHY context + * ( PHY_CONTEXT_CMD = 0x8 ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @lmac_id: the lmac id the phy context belongs to + * @ci: channel info + * @rxchain_info: ??? + * @dsp_cfg_flags: set to 0 + * @reserved: reserved to align to 64 bit + */ +struct iwl_phy_context_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* PHY_CONTEXT_DATA_API_S_VER_3 */ + struct iwl_fw_channel_info ci; + __le32 lmac_id; + __le32 rxchain_info; + __le32 dsp_cfg_flags; + __le32 reserved; +} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */ #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index 8991ddffbf5e..0debca6dd037 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -214,6 +214,15 @@ struct iwl_dts_measurement_notif_v2 { } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ /** + * struct iwl_dts_measurement_resp - measurements response + * + * @temp: the measured temperature + */ +struct iwl_dts_measurement_resp { + __le32 temp; +} __packed; /* CMD_DTS_MEASUREMENT_RSP_API_S_VER_1 */ + +/** * struct ct_kill_notif - CT-kill entry notification * * @temperature: the current temperature in celsius diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 6e1b9b21904e..4e6ad1793d0a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -329,48 +329,56 @@ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5, }; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */; +#define IWL_NUM_CHAIN_TABLES 1 +#define IWL_NUM_CHAIN_TABLES_V2 2 #define IWL_NUM_CHAIN_LIMITS 2 #define IWL_NUM_SUB_BANDS 5 +#define IWL_NUM_SUB_BANDS_V2 11 /** - * struct iwl_dev_tx_power_cmd - TX power reduction command + * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. * @dev_24: device TX power restriction in 1/8 dBms * @dev_52_low: device TX power restriction upper band - low * @dev_52_high: device TX power restriction upper band - high - * @per_chain_restriction: per chain restrictions */ -struct iwl_dev_tx_power_cmd_v3 { +struct iwl_dev_tx_power_common { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; __le16 dev_24; __le16 dev_52_low; __le16 dev_52_high; - __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; +}; + +/** + * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3 + * @per_chain: per chain restrictions + */ +struct iwl_dev_tx_power_cmd_v3 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ #define IWL_DEV_MAX_TX_POWER 0x7FFF /** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * @v3: version 3 of the command, embedded here for easier software handling + * struct iwl_dev_tx_power_cmd_v4 - TX power reduction command version 4 + * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @reserved: reserved (padding) */ struct iwl_dev_tx_power_cmd_v4 { - /* v4 is just an extension of v3 - keep this here */ - struct iwl_dev_tx_power_cmd_v3 v3; + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ /** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * @v3: version 3 of the command, embedded here for easier software handling + * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5 + * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed @@ -381,16 +389,56 @@ struct iwl_dev_tx_power_cmd_v4 { * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ -struct iwl_dev_tx_power_cmd { - /* v5 is just an extension of v3 - keep this here */ - struct iwl_dev_tx_power_cmd_v3 v3; +struct iwl_dev_tx_power_cmd_v5 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; } __packed; /* TX_REDUCED_POWER_API_S_VER_5 */ +/** + * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5 + * @per_chain: per chain restrictions + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + */ +struct iwl_dev_tx_power_cmd_v6 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; +} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */ + +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) + * @common: common part of the command + * @v3: version 3 part of the command + * @v4: version 4 part of the command + * @v5: version 5 part of the command + * @v6: version 6 part of the command + */ +struct iwl_dev_tx_power_cmd { + struct iwl_dev_tx_power_common common; + union { + struct iwl_dev_tx_power_cmd_v3 v3; + struct iwl_dev_tx_power_cmd_v4 v4; + struct iwl_dev_tx_power_cmd_v5 v5; + struct iwl_dev_tx_power_cmd_v6 v6; + }; +}; + #define IWL_NUM_GEO_PROFILES 3 +#define IWL_NUM_BANDS_PER_CHAIN_V1 2 +#define IWL_NUM_BANDS_PER_CHAIN_V2 3 /** * enum iwl_geo_per_chain_offset_operation - type of operation @@ -414,11 +462,6 @@ struct iwl_per_chain_offset { u8 chain_b; } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ -struct iwl_per_chain_offset_group { - struct iwl_per_chain_offset lb; - struct iwl_per_chain_offset hb; -} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ - /** * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation @@ -426,20 +469,38 @@ struct iwl_per_chain_offset_group { */ struct iwl_geo_tx_power_profiles_cmd_v1 { __le32 ops; - struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; + struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; } __packed; /* GEO_TX_POWER_LIMIT_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. + * struct iwl_geo_tx_power_profile_cmd_v2 - struct for GEO_TX_POWER_LIMIT cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + * @table_revision: BIOS table revision. + */ +struct iwl_geo_tx_power_profiles_cmd_v2 { + __le32 ops; + struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; + __le32 table_revision; +} __packed; /* GEO_TX_POWER_LIMIT_VER_2 */ + +/** + * struct iwl_geo_tx_power_profile_cmd_v3 - struct for GEO_TX_POWER_LIMIT cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: BIOS table revision. */ -struct iwl_geo_tx_power_profiles_cmd { +struct iwl_geo_tx_power_profiles_cmd_v3 { __le32 ops; - struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; + struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2]; __le32 table_revision; -} __packed; /* GEO_TX_POWER_LIMIT */ +} __packed; /* GEO_TX_POWER_LIMIT_VER_3 */ + +union iwl_geo_tx_power_profiles_cmd { + struct iwl_geo_tx_power_profiles_cmd_v1 v1; + struct iwl_geo_tx_power_profiles_cmd_v2 v2; + struct iwl_geo_tx_power_profiles_cmd_v3 v3; +}; /** * struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd @@ -450,16 +511,26 @@ struct iwl_geo_tx_power_profiles_resp { } __packed; /* GEO_TX_POWER_LIMIT_RESP */ /** - * struct iwl_ppag_table_cmd - struct for PER_PLATFORM_ANT_GAIN_CMD cmd. + * union iwl_ppag_table_cmd - union for all versions of PPAG command + * @v1: version 1, table revision = 0 + * @v2: version 2, table revision = 1 + * * @enabled: 1 if PPAG is enabled, 0 otherwise * @gain: table of antenna gain values per chain and sub-band * @reserved: reserved */ -struct iwl_ppag_table_cmd { - __le32 enabled; - s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; - s8 reserved[2]; -} __packed; /* PER_PLATFORM_ANT_GAIN_CMD */ +union iwl_ppag_table_cmd { + struct { + __le32 enabled; + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; + s8 reserved[2]; + } v1; + struct { + __le32 enabled; + s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + s8 reserved[2]; + } v2; +} __packed; /** * struct iwl_beacon_filter_cmd diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 4347be6491e9..73b839c3cac1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -486,6 +486,13 @@ enum { #define RATE_MCS_HE_106T_POS 28 #define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) +/* Bit 30-31: (1) RTS, (2) CTS */ +#define RATE_MCS_RTS_REQUIRED_POS (30) +#define RATE_MCS_RTS_REQUIRED_MSK (0x1 << RATE_MCS_RTS_REQUIRED_POS) + +#define RATE_MCS_CTS_REQUIRED_POS (31) +#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS) + /* Link Quality definitions */ /* # entries in rate scale table to support Tx retries */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index b8b36a4f9eb9..8a8a204bfe26 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -308,17 +308,11 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), -}; -enum iwl_rx_mpdu_hash_filter { - IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f, - IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0, -}; + IWL_RX_MPDU_STATUS_KEY = 0x3f0000, + IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22), -enum iwl_rx_mpdu_sta_id_flags { - IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f, - IWL_RX_MPDU_SIF_RRF_ABORT = 0x20, - IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0, + IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000, }; #define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f @@ -560,7 +554,11 @@ struct iwl_rx_mpdu_desc_v3 { /** * @raw_xsum: raw xsum value */ - __le32 raw_xsum; + __be16 raw_xsum; + /** + * @reserved_xsum: reserved high bits in the raw checksum + */ + __le16 reserved_xsum; /* DW11 */ /** * @rate_n_flags: RX rate/flags encoding @@ -668,15 +666,8 @@ struct iwl_rx_mpdu_desc { /** * @status: &enum iwl_rx_mpdu_status */ - __le16 status; - /** - * @hash_filter: hash filter value - */ - u8 hash_filter; - /** - * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags - */ - u8 sta_id_flags; + __le32 status; + /* DW6 */ /** * @reorder_data: &enum iwl_rx_mpdu_reorder_data diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 82d59b5a5f8c..de2e2ca7a3ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -5,9 +5,8 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,9 +26,8 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -61,6 +59,7 @@ #ifndef __iwl_fw_api_tx_h__ #define __iwl_fw_api_tx_h__ +#include <linux/ieee80211.h> /** * enum iwl_tx_flags - bitmasks for tx_flags in TX command diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 7ea55cfdd8a8..ab4a8b942c81 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1507,6 +1507,27 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_region_special_device_memory *special_mem = + ®->special_mem; + + struct iwl_fw_ini_error_dump_range *range = range_ptr; + u32 addr = le32_to_cpu(special_mem->base_addr) + + le32_to_cpu(special_mem->offset); + + range->internal_base_addr = cpu_to_le32(addr); + range->range_data_size = special_mem->size; + iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, + le32_to_cpu(special_mem->size)); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, int idx) @@ -1636,6 +1657,21 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, return dump->ranges; } +static void * +iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_special_device_memory *dump = data; + + dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); + dump->type = reg->special_mem.type; + dump->version = reg->special_mem.version; + + return dump->ranges; +} + static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -1827,6 +1863,20 @@ iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt, } static u32 +iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 size = le32_to_cpu(reg->special_mem.size); + + if (size) + size += sizeof(struct iwl_fw_ini_special_device_memory) + + sizeof(struct iwl_fw_ini_error_dump_range); + + return size; +} + +static u32 iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -2125,6 +2175,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_config_iter, }, + [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = { + .get_num_of_ranges = iwl_dump_ini_single_range, + .get_size = iwl_dump_ini_special_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header, + .fill_range = iwl_dump_ini_special_mem_iter, + }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 72bfc64580ab..cb40f509ab61 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -495,6 +495,20 @@ struct iwl_fw_ini_monitor_dump { } __packed; /** + * struct iwl_fw_ini_special_device_memory - special device memory + * @header: header of the region + * @type: type of special memory + * @version: struct special memory version + * @ranges: the memory ranges of this this region + */ +struct iwl_fw_ini_special_device_memory { + struct iwl_fw_ini_error_dump_header header; + __le16 type; + __le16 version; + struct iwl_fw_ini_error_dump_range ranges[]; +} __packed; + +/** * struct iwl_fw_error_dump_paging - content of the UMAC's image page * block on DRAM * @index: the index of the page block diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 1fb45fd30ffa..044b9d4ca4d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -405,8 +405,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * to report the CSI information with (certain) RX frames * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both * initiator and responder - * * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload + * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -451,6 +451,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52, IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54, + IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c index de8cff463dbe..c2a4e60518bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -25,7 +25,7 @@ * * BSD LICENSE * - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -57,22 +57,25 @@ #include "img.h" -u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd) +u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def) { const struct iwl_fw_cmd_version *entry; unsigned int i; if (!fw->ucode_capa.cmd_versions || !fw->ucode_capa.n_cmd_versions) - return IWL_FW_CMD_VER_UNKNOWN; + return def; entry = fw->ucode_capa.cmd_versions; for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) { - if (entry->group == grp && entry->cmd == cmd) + if (entry->group == grp && entry->cmd == cmd) { + if (entry->cmd_ver == IWL_FW_CMD_VER_UNKNOWN) + return def; return entry->cmd_ver; + } } - return IWL_FW_CMD_VER_UNKNOWN; + return def; } EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver); @@ -97,3 +100,43 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def) return def; } EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver); + +#define FW_SYSASSERT_CPU_MASK 0xf0000000 +static const struct { + const char *name; + u8 num; +} advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "BAD_COMMAND", 0x39 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_LMAC_FATAL", 0x70 }, + { "NMI_INTERRUPT_UMAC_FATAL", 0x71 }, + { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +const char *iwl_fw_lookup_assert_desc(u32 num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) + if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK)) + return advanced_lookup[i].name; + + /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ + return advanced_lookup[i].name; +} +EXPORT_SYMBOL_GPL(iwl_fw_lookup_assert_desc); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index a8630bf90b63..07d72bcf9c46 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +30,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -313,7 +312,8 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type) return &fw->img[ucode_type]; } -u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd); +u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); +const char *iwl_fw_lookup_assert_desc(u32 num); #endif /* __iwl_fw_img_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index b373606e1241..f8516c7ca767 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -134,7 +134,8 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) SOC_FLAGS_LTR_APPLY_DELAY_MASK); if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC) >= 2 && + SCAN_REQ_UMAC, + IWL_FW_CMD_VER_UNKNOWN) >= 2 && fwrt->trans->trans_cfg->low_latency_xtal) cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index b5e5e32b6152..cddcb4d9a264 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -207,7 +207,8 @@ struct iwl_fw_runtime { u8 sar_chain_b_profile; struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; u32 geo_rev; - struct iwl_ppag_table_cmd ppag_table; + union iwl_ppag_table_cmd ppag_table; + u32 ppag_ver; #endif }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index e27c13263a23..d03f51bf7dfd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -472,6 +472,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_QU 0x33 #define IWL_CFG_MAC_TYPE_QUZ 0x35 #define IWL_CFG_MAC_TYPE_QNJ 0x36 +#define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -479,6 +480,8 @@ struct iwl_cfg { #define IWL_CFG_RF_TYPE_JF1 0x108 #define IWL_CFG_RF_TYPE_HR2 0x10A #define IWL_CFG_RF_TYPE_HR1 0x10C +#define IWL_CFG_RF_TYPE_GF 0x10D +#define IWL_CFG_RF_TYPE_MR 0x110 #define IWL_CFG_RF_ID_TH 0x1 #define IWL_CFG_RF_ID_TH1 0x1 @@ -522,6 +525,7 @@ extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; @@ -543,7 +547,11 @@ extern const char iwl_ax201_name[]; extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; extern const char iwl_ax200_killer_1650x_name[]; - +extern const char iwl_ax201_killer_1650s_name[]; +extern const char iwl_ax201_killer_1650i_name[]; +extern const char iwl_ma_name[]; +extern const char iwl_ax211_name[]; +extern const char iwl_ax411_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; @@ -641,6 +649,9 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0; extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0; +extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0; +extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0; +extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 9ce7207d9ec5..51ce93d21ffe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -182,9 +182,13 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, alloc_id >= IWL_FW_INI_ALLOCATION_NUM) goto err; - if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH || - buf_location == IWL_FW_INI_LOCATION_NPK_PATH) && - alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) + if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH && + alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) + goto err; + + if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH && + alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 && + alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL) goto err; trans->dbg.fw_mon_cfg[alloc_id] = *alloc; @@ -233,6 +237,13 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, if (le32_to_cpu(tlv->length) < sizeof(*reg)) return -EINVAL; + /* For safe using a string from FW make sure we have a + * null terminator + */ + reg->name[IWL_FW_INI_MAX_NAME - 1] = 0; + + IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name); + if (id >= IWL_FW_INI_MAX_REGION_ID) { IWL_ERR(trans, "WRT: Invalid region id %u\n", id); return -EINVAL; @@ -947,9 +958,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, struct iwl_rx_packet *pkt = tp_data->fw_pkt; struct iwl_cmd_header *wanted_hdr = (void *)&trig_data; - if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) || - (pkt->hdr.cmd == wanted_hdr->cmd && - pkt->hdr.group_id == wanted_hdr->group_id))) { + if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd && + pkt->hdr.group_id == wanted_hdr->group_id)) { struct iwl_rx_packet *fw_pkt = kmemdup(pkt, sizeof(*pkt) + iwl_rx_packet_payload_len(pkt), @@ -1012,6 +1022,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest; int ret, i; + if (*ini_dest != IWL_FW_INI_LOCATION_INVALID) + return; + IWL_DEBUG_FW(fwrt, "WRT: Generating active triggers list, domain 0x%x\n", fwrt->trans->dbg.domains_bitmap); @@ -1076,6 +1089,7 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, break; case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: + case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index 063d8add147f..528eba441926 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2018 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * Portions of this file are derived from the ipw3945 project. * @@ -139,7 +139,7 @@ do { \ /* 0x00000F00 - 0x00000100 */ #define IWL_DL_POWER 0x00000100 #define IWL_DL_TEMP 0x00000200 -#define IWL_DL_RPM 0x00000400 +#define IWL_DL_WOWLAN 0x00000400 #define IWL_DL_SCAN 0x00000800 /* 0x0000F000 - 0x00001000 */ #define IWL_DL_ASSOC 0x00001000 @@ -205,7 +205,7 @@ do { \ #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) #define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a) -#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) +#define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a) #define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) #define IWL_DEBUG_FW_INFO(p, f, a...) \ IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index ee410417761d..6d19de3058d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -254,6 +254,65 @@ enum iwl_reg_capa_flags { REG_CAPA_11AX_DISABLED = BIT(10), }; +/** + * enum iwl_reg_capa_flags_v2 - global flags applied for the whole regulatory + * domain (version 2). + * @REG_CAPA_V2_STRADDLE_DISABLED: Straddle channels (144, 142, 138) are + * disabled. + * @REG_CAPA_V2_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * 2.4Ghz band is allowed. + * @REG_CAPA_V2_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * 5Ghz band is allowed. + * @REG_CAPA_V2_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_V2_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_V2_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_V2_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_V2_WEATHER_DISABLED: Weather radar channels (120, 124, 128, 118, + * 126, 122) are disabled. + * @REG_CAPA_V2_40MHZ_ALLOWED: 11n channel with a width of 40Mhz is allowed + * for this regulatory domain (uvalid only in 5Ghz). + * @REG_CAPA_V2_11AX_DISABLED: 11ax is forbidden for this regulatory domain. + */ +enum iwl_reg_capa_flags_v2 { + REG_CAPA_V2_STRADDLE_DISABLED = BIT(0), + REG_CAPA_V2_BF_CCD_LOW_BAND = BIT(1), + REG_CAPA_V2_BF_CCD_HIGH_BAND = BIT(2), + REG_CAPA_V2_160MHZ_ALLOWED = BIT(3), + REG_CAPA_V2_80MHZ_ALLOWED = BIT(4), + REG_CAPA_V2_MCS_8_ALLOWED = BIT(5), + REG_CAPA_V2_MCS_9_ALLOWED = BIT(6), + REG_CAPA_V2_WEATHER_DISABLED = BIT(7), + REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), + REG_CAPA_V2_11AX_DISABLED = BIT(13), +}; + +/* +* API v2 for reg_capa_flags is relevant from version 6 and onwards of the +* MCC update command response. +*/ +#define REG_CAPA_V2_RESP_VER 6 + +/** + * struct iwl_reg_capa - struct for global regulatory capabilities, Used for + * handling the different APIs of reg_capa_flags. + * + * @allow_40mhz: 11n channel with a width of 40Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @disable_11ax: 11ax is forbidden for this regulatory domain. + */ +struct iwl_reg_capa { + u16 allow_40mhz; + u16 allow_80mhz; + u16 allow_160mhz; + u16 disable_11ax; +}; + static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { @@ -1064,7 +1123,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, - u16 cap_flags, + struct iwl_reg_capa reg_capa, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1104,29 +1163,46 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, flags |= NL80211_RRF_GO_CONCURRENT; /* - * cap_flags is per regulatory domain so apply it for every channel + * reg_capa is per regulatory domain so apply it for every channel */ if (ch_idx >= NUM_2GHZ_CHANNELS) { - if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN) + if (!reg_capa.allow_40mhz) flags |= NL80211_RRF_NO_HT40; - if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED)) + if (!reg_capa.allow_80mhz) flags |= NL80211_RRF_NO_80MHZ; - if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED)) + if (!reg_capa.allow_160mhz) flags |= NL80211_RRF_NO_160MHZ; } - - if (cap_flags & REG_CAPA_11AX_DISABLED) + if (reg_capa.disable_11ax) flags |= NL80211_RRF_NO_HE; return flags; } +static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver) +{ + struct iwl_reg_capa reg_capa; + + if (resp_ver >= REG_CAPA_V2_RESP_VER) { + reg_capa.allow_40mhz = flags & REG_CAPA_V2_40MHZ_ALLOWED; + reg_capa.allow_80mhz = flags & REG_CAPA_V2_80MHZ_ALLOWED; + reg_capa.allow_160mhz = flags & REG_CAPA_V2_160MHZ_ALLOWED; + reg_capa.disable_11ax = flags & REG_CAPA_V2_11AX_DISABLED; + } else { + reg_capa.allow_40mhz = !(flags & REG_CAPA_40MHZ_FORBIDDEN); + reg_capa.allow_80mhz = flags & REG_CAPA_80MHZ_ALLOWED; + reg_capa.allow_160mhz = flags & REG_CAPA_160MHZ_ALLOWED; + reg_capa.disable_11ax = flags & REG_CAPA_11AX_DISABLED; + } + return reg_capa; +} + struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u16 cap) + u16 geo_info, u16 cap, u8 resp_ver) { int ch_idx; u16 ch_flags; @@ -1139,6 +1215,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int valid_rules = 0; bool new_rule; int max_num_ch; + struct iwl_reg_capa reg_capa; if (cfg->uhb_supported) { max_num_ch = IWL_NVM_NUM_CHANNELS_UHB; @@ -1169,6 +1246,9 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, regd->alpha2[0] = fw_mcc >> 8; regd->alpha2[1] = fw_mcc & 0xff; + /* parse regulatory capability flags */ + reg_capa = iwl_get_reg_capa(cap, resp_ver); + for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = iwl_nl80211_band_from_channel_idx(ch_idx); @@ -1183,7 +1263,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, - ch_flags, cap, + ch_flags, reg_capa, cfg); /* we can't continue the same rule */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index fb0b385d10fd..50bd7fdcf852 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -104,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u16 cap); + u16 geo_info, u16 cap, u8 resp_ver); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index f91197e4ae40..a26da96763dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -7,6 +7,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2019 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2019 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,17 +62,20 @@ #include <linux/kernel.h> #include <linux/bsearch.h> +#include "fw/api/tx.h" #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-fh.h" +#include "queue/tx.h" +#include <linux/dmapool.h> struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_trans_ops *ops, - unsigned int cmd_pool_size, - unsigned int cmd_pool_align) + const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans *trans; + int txcmd_size, txcmd_align; #ifdef CONFIG_LOCKDEP static struct lock_class_key __key; #endif @@ -79,6 +84,25 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, if (!trans) return NULL; + trans->trans_cfg = cfg_trans; + if (!cfg_trans->gen2) { + txcmd_size = sizeof(struct iwl_tx_cmd); + txcmd_align = sizeof(void *); + } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + txcmd_size = sizeof(struct iwl_tx_cmd_gen2); + txcmd_align = 64; + } else { + txcmd_size = sizeof(struct iwl_tx_cmd_gen3); + txcmd_align = 128; + } + + txcmd_size += sizeof(struct iwl_cmd_header); + txcmd_size += 36; /* biggest possible 802.11 header */ + + /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ + if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) + return ERR_PTR(-EINVAL); + #ifdef CONFIG_LOCKDEP lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", &__key, 0); @@ -88,22 +112,68 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, trans->ops = ops; trans->num_rx_queues = 1; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); + else + trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); + /* + * For gen2 devices, we use a single allocation for each byte-count + * table, but they're pretty small (1k) so use a DMA pool that we + * allocate here. + */ + if (trans->trans_cfg->gen2) { + trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev, + trans->txqs.bc_tbl_size, + 256, 0); + if (!trans->txqs.bc_pool) + return NULL; + } + + if (trans->trans_cfg->use_tfh) { + trans->txqs.tfd.addr_size = 64; + trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; + trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); + } else { + trans->txqs.tfd.addr_size = 36; + trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; + trans->txqs.tfd.size = sizeof(struct iwl_tfd); + } + trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); + snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, - cmd_pool_size, cmd_pool_align, + txcmd_size, txcmd_align, SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return NULL; WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); + trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); + if (!trans->txqs.tso_hdr_page) { + kmem_cache_destroy(trans->dev_cmd_pool); + return NULL; + } + return trans; } void iwl_trans_free(struct iwl_trans *trans) { + int i; + + for_each_possible_cpu(i) { + struct iwl_tso_hdr_page *p = + per_cpu_ptr(trans->txqs.tso_hdr_page, i); + + if (p->page) + __free_page(p->page); + } + + free_percpu(trans->txqs.tso_hdr_page); + kmem_cache_destroy(trans->dev_cmd_pool); } @@ -130,7 +200,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); - if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) + if (!iwl_cmd_groupid(cmd->id)) cmd->id = DEF_ID(cmd->id); ret = trans->ops->send_cmd(trans, cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 34788e7afc7b..45a9df0d42ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -215,6 +215,12 @@ struct iwl_device_tx_cmd { */ #define IWL_MAX_CMD_TBS_PER_TFD 2 +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3) + /** * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command * @@ -316,6 +322,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 #define IWL_MAX_RX_HW_QUEUES 16 +#define IWL_9000_MAX_RX_HW_QUEUES 6 /** * enum iwl_wowlan_status - WoWLAN image/device status @@ -906,19 +913,37 @@ struct iwl_txq { /** * struct iwl_trans_txqs - transport tx queues data * + * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) + * @page_offs: offset from skb->cb to mac header page pointer + * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer * @queue_used - bit mask of used queues * @queue_stopped - bit mask of stopped queues + * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler */ struct iwl_trans_txqs { unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + struct dma_pool *bc_pool; + size_t bc_tbl_size; + bool bc_table_dword; + u8 page_offs; + u8 dev_cmd_offs; + struct __percpu iwl_tso_hdr_page * tso_hdr_page; + struct { u8 fifo; u8 q_id; unsigned int wdg_timeout; } cmd; + struct { + u8 max_tbs; + u16 size; + u8 addr_size; + } tfd; + + struct iwl_dma_ptr scd_bc_tbls; }; /** @@ -939,7 +964,6 @@ struct iwl_trans_txqs { * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled - * @wide_cmd_header: true when ucode supports wide command header format * @num_rx_queues: number of RX queues allocated by the transport; * the transport must set this before calling iwl_drv_start() * @iml_len: the length of the image loader @@ -979,7 +1003,6 @@ struct iwl_trans { const struct iwl_hcmd_arr *command_groups; int command_groups_size; - bool wide_cmd_header; u8 num_rx_queues; @@ -1435,10 +1458,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) * transport helper functions *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, - struct device *dev, - const struct iwl_trans_ops *ops, - unsigned int cmd_pool_size, - unsigned int cmd_pool_align); + struct device *dev, + const struct iwl_trans_ops *ops, + const struct iwl_cfg_trans_params *cfg_trans); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index b0268f44b2ea..2487871eac73 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -152,12 +152,18 @@ #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true #define IWL_MVM_D3_DEBUG false -#define IWL_MVM_USE_TWT false +#define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 #define IWL_MVM_USE_NSSN_SYNC 0 #define IWL_MVM_PHY_FILTER_CHAIN_A 0 #define IWL_MVM_PHY_FILTER_CHAIN_B 0 #define IWL_MVM_PHY_FILTER_CHAIN_C 0 #define IWL_MVM_PHY_FILTER_CHAIN_D 0 +#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false +#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40 +/* 20016 pSec is 6 meter RTT, meaning 3 meter range */ +#define IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT 20016 +#define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016 +#define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 2a94545d737f..e47c0be28656 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -70,6 +70,7 @@ #include "iwl-modparams.h" #include "fw-api.h" #include "mvm.h" +#include "fw/img.h" void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -80,8 +81,11 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN); - memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN); + mvmvif->rekey_data.kek_len = data->kek_len; + mvmvif->rekey_data.kck_len = data->kck_len; + memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len); + memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len); + mvmvif->rekey_data.akm = data->akm & 0xFF; mvmvif->rekey_data.replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); mvmvif->rekey_data.valid = true; @@ -156,6 +160,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, struct wowlan_key_data { struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; struct iwl_wowlan_tkip_params_cmd *tkip; + struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd; bool error, use_rsc_tsc, use_tkip, configure_keys; int wep_key_idx; }; @@ -232,7 +237,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, default: data->error = true; return; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + return; case WLAN_CIPHER_SUITE_AES_CMAC: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them * but we also shouldn't abort suspend due to that. It does have @@ -245,8 +255,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, if (sta) { u64 pn64; - tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; - tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; + tkip_sc = + data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = + &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; rx_p1ks = data->tkip->rx_uni; @@ -265,9 +277,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, rx_mic_key = data->tkip->mic_keys.rx_unicast; } else { tkip_sc = - data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc; + data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; rx_p1ks = data->tkip->rx_multi; rx_mic_key = data->tkip->mic_keys.rx_mcast; + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_TKIP); } /* @@ -299,16 +313,25 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, data->use_rsc_tsc = true; break; case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: if (sta) { u64 pn64; - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; - aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; + aes_sc = + data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = + &data->rsc_tsc->params.all_tsc_rsc.aes.tsc; pn64 = atomic64_read(&key->tx_pn); aes_tx_sc->pn = cpu_to_le64(pn64); } else { - aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; + aes_sc = + data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; + data->kek_kck_cmd->gtk_cipher = + key->cipher == WLAN_CIPHER_SUITE_CCMP ? + cpu_to_le32(STA_KEY_FLG_CCM) : + cpu_to_le32(STA_KEY_FLG_GCMP); } /* @@ -354,6 +377,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, break; } + IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher); + if (data->configure_keys) { mutex_lock(&mvm->mutex); /* @@ -734,7 +759,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 cmd_flags) { - struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; + struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); @@ -743,9 +768,12 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, + .kek_kck_cmd = &kek_kck_cmd, }; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + u8 cmd_ver; + size_t cmd_size; key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); if (!key_data.rsc_tsc) @@ -772,10 +800,29 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, } if (key_data.use_rsc_tsc) { - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_TSC_RSC_PARAM, cmd_flags, - sizeof(*key_data.rsc_tsc), + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + WOWLAN_TSC_RSC_PARAM, + IWL_FW_CMD_VER_UNKNOWN); + int size; + + if (ver == 4) { + size = sizeof(*key_data.rsc_tsc); + key_data.rsc_tsc->sta_id = + cpu_to_le32(mvmvif->ap_sta_id); + + } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { + size = sizeof(key_data.rsc_tsc->params); + } else { + ret = 0; + WARN_ON_ONCE(1); + goto out; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + cmd_flags, + size, key_data.rsc_tsc); + if (ret) goto out; } @@ -783,9 +830,27 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, if (key_data.use_tkip && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + WOWLAN_TKIP_PARAM, + IWL_FW_CMD_VER_UNKNOWN); + int size; + + if (ver == 2) { + size = sizeof(tkip_cmd); + key_data.tkip->sta_id = + cpu_to_le32(mvmvif->ap_sta_id); + } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { + size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); + } else { + ret = -EINVAL; + WARN_ON_ONCE(1); + goto out; + } + + /* send relevant data according to CMD version */ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, - cmd_flags, sizeof(tkip_cmd), + cmd_flags, size, &tkip_cmd); if (ret) goto out; @@ -793,18 +858,34 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, /* configure rekey data only if offloaded rekey is supported (d3) */ if (mvmvif->rekey_data.valid) { + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + IWL_ALWAYS_LONG_GROUP, + WOWLAN_KEK_KCK_MATERIAL, + IWL_FW_CMD_VER_UNKNOWN); + if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && + cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) + return -EINVAL; + if (cmd_ver == 3) + cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3); + else + cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2); + memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd)); memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, - NL80211_KCK_LEN); - kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN); + mvmvif->rekey_data.kck_len); + kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, - NL80211_KEK_LEN); - kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); + mvmvif->rekey_data.kek_len); + kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; + kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); + + IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", + mvmvif->rekey_data.akm); ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, cmd_flags, - sizeof(kek_kck_cmd), + cmd_size, &kek_kck_cmd); if (ret) goto out; @@ -1331,6 +1412,8 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key); break; case WLAN_CIPHER_SUITE_TKIP: @@ -1367,6 +1450,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, /* ignore WEP completely, nothing to do */ return; case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_TKIP: /* we support these */ break; @@ -1392,6 +1477,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc, sta, key); atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); @@ -1460,6 +1547,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); + IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", + le32_to_cpu(status->num_of_gtk_rekeys)); if (status->num_of_gtk_rekeys) { struct ieee80211_key_conf *key; struct { @@ -1472,13 +1561,26 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, }; __be64 replay_ctr; + IWL_DEBUG_WOWLAN(mvm, + "Received from FW GTK cipher %d, key index %d\n", + conf.conf.cipher, conf.conf.keyidx); switch (gtkdata.cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); conf.conf.keylen = WLAN_KEY_LEN_CCMP; memcpy(conf.conf.key, status->gtk[0].key, WLAN_KEY_LEN_CCMP); break; + case WLAN_CIPHER_SUITE_GCMP_256: + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + memcpy(conf.conf.key, status->gtk[0].key, + WLAN_KEY_LEN_GCMP_256); + break; case WLAN_CIPHER_SUITE_TKIP: + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); conf.conf.keylen = WLAN_KEY_LEN_TKIP; memcpy(conf.conf.key, status->gtk[0].key, 16); /* leave TX MIC key zeroed, we don't use it anyway */ @@ -1508,15 +1610,60 @@ out: return true; } +/* Occasionally, templates would be nice. This is one of those times ... */ +#define iwl_mvm_parse_wowlan_status_common(_ver) \ +static struct iwl_wowlan_status * \ +iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ + void *_data, int len) \ +{ \ + struct iwl_wowlan_status *status; \ + struct iwl_wowlan_status_ ##_ver *data = _data; \ + int data_size; \ + \ + if (len < sizeof(*data)) { \ + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ + return ERR_PTR(-EIO); \ + } \ + \ + data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ + if (len != sizeof(*data) + data_size) { \ + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ + return ERR_PTR(-EIO); \ + } \ + \ + status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ + if (!status) \ + return ERR_PTR(-ENOMEM); \ + \ + /* copy all the common fields */ \ + status->replay_ctr = data->replay_ctr; \ + status->pattern_number = data->pattern_number; \ + status->non_qos_seq_ctr = data->non_qos_seq_ctr; \ + memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \ + sizeof(status->qos_seq_ctr)); \ + status->wakeup_reasons = data->wakeup_reasons; \ + status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \ + status->received_beacons = data->received_beacons; \ + status->wake_packet_length = data->wake_packet_length; \ + status->wake_packet_bufsize = data->wake_packet_bufsize; \ + memcpy(status->wake_packet, data->wake_packet, \ + le32_to_cpu(status->wake_packet_bufsize)); \ + \ + return status; \ +} + +iwl_mvm_parse_wowlan_status_common(v6) +iwl_mvm_parse_wowlan_status_common(v7) +iwl_mvm_parse_wowlan_status_common(v9) + struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) { - struct iwl_wowlan_status_v7 *v7; struct iwl_wowlan_status *status; struct iwl_host_cmd cmd = { .id = WOWLAN_GET_STATUSES, .flags = CMD_WANT_SKB, }; - int ret, len, status_size, data_size; + int ret, len; u8 notif_ver; lockdep_assert_held(&mvm->mutex); @@ -1528,28 +1675,19 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) } len = iwl_rx_packet_payload_len(cmd.resp_pkt); + + /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + WOWLAN_GET_STATUSES, 7); + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; - status_size = sizeof(*v6); - - if (len < status_size) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - status = ERR_PTR(-EIO); - goto out_free_resp; - } - - data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4); - - if (len != (status_size + data_size)) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - status = ERR_PTR(-EIO); - goto out_free_resp; - } - - status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); - if (!status) + status = iwl_mvm_parse_wowlan_status_common_v6(mvm, + cmd.resp_pkt->data, + len); + if (IS_ERR(status)) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > @@ -1574,47 +1712,37 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm) * currently used key. */ status->gtk[0].key_flags = v6->gtk.key_index | BIT(7); + } else if (notif_ver == 7) { + struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; - status->replay_ctr = v6->replay_ctr; - - /* everything starting from pattern_number is identical */ - memcpy(&status->pattern_number, &v6->pattern_number, - offsetof(struct iwl_wowlan_status, wake_packet) - - offsetof(struct iwl_wowlan_status, pattern_number) + - data_size); - - goto out_free_resp; - } - - v7 = (void *)cmd.resp_pkt->data; - notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - WOWLAN_GET_STATUSES, 0); + status = iwl_mvm_parse_wowlan_status_common_v7(mvm, + cmd.resp_pkt->data, + len); + if (IS_ERR(status)) + goto out_free_resp; - status_size = sizeof(*status); + status->gtk[0] = v7->gtk[0]; + status->igtk[0] = v7->igtk[0]; + } else if (notif_ver == 9) { + struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; - if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9) - status_size = sizeof(*v7); + status = iwl_mvm_parse_wowlan_status_common_v9(mvm, + cmd.resp_pkt->data, + len); + if (IS_ERR(status)) + goto out_free_resp; - if (len < status_size) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); - status = ERR_PTR(-EIO); - goto out_free_resp; - } - data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4); + status->gtk[0] = v9->gtk[0]; + status->igtk[0] = v9->igtk[0]; - if (len != (status_size + data_size)) { - IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); + status->tid_tear_down = v9->tid_tear_down; + } else { + IWL_ERR(mvm, + "Firmware advertises unknown WoWLAN status response %d!\n", + notif_ver); status = ERR_PTR(-EIO); - goto out_free_resp; } - status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); - if (!status) - goto out_free_resp; - - memcpy(status, v7, status_size); - memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size); - out_free_resp: iwl_free_resp(&cmd); return status; @@ -1647,6 +1775,9 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, if (IS_ERR_OR_NULL(fw_status)) goto out_unlock; + IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", + le32_to_cpu(fw_status->wakeup_reasons)); + status.pattern_number = le16_to_cpu(fw_status->pattern_number); for (i = 0; i < 8; i++) status.qos_seq_ctr[i] = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 5ca45915cf7c..a0ce761d0c59 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -76,6 +76,103 @@ struct iwl_mvm_loc_entry { u8 buf[]; }; +struct iwl_mvm_smooth_entry { + struct list_head list; + u8 addr[ETH_ALEN]; + s64 rtt_avg; + u64 host_time; +}; + +struct iwl_mvm_ftm_pasn_entry { + struct list_head list; + u8 addr[ETH_ALEN]; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + u8 cipher; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; +}; + +int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u8 *addr, u32 cipher, u8 *tk, u32 tk_len, + u8 *hltk, u32 hltk_len) +{ + struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), + GFP_KERNEL); + u32 expected_tk_len; + + lockdep_assert_held(&mvm->mutex); + + if (!pasn) + return -ENOBUFS; + + pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); + + switch (pasn->cipher) { + case IWL_LOCATION_CIPHER_CCMP_128: + case IWL_LOCATION_CIPHER_GCMP_128: + expected_tk_len = WLAN_KEY_LEN_CCMP; + break; + case IWL_LOCATION_CIPHER_GCMP_256: + expected_tk_len = WLAN_KEY_LEN_GCMP_256; + break; + default: + goto out; + } + + /* + * If associated to this AP and already have security context, + * the TK is already configured for this station, so it + * shouldn't be set again here. + */ + if (vif->bss_conf.assoc && + !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); + if (!IS_ERR_OR_NULL(sta) && sta->mfp) + expected_tk_len = 0; + rcu_read_unlock(); + } + + if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) { + IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", + tk_len, hltk_len); + goto out; + } + + memcpy(pasn->addr, addr, sizeof(pasn->addr)); + memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); + + if (tk && tk_len) + memcpy(pasn->tk, tk, sizeof(pasn->tk)); + + list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); + return 0; +out: + kfree(pasn); + return -EINVAL; +} + +void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) +{ + struct iwl_mvm_ftm_pasn_entry *entry, *prev; + + lockdep_assert_held(&mvm->mutex); + + list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, + list) { + if (memcmp(entry->addr, addr, sizeof(entry->addr))) + continue; + + list_del(&entry->list); + kfree(entry); + return; + } +} + static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) { struct iwl_mvm_loc_entry *e, *t; @@ -84,6 +181,7 @@ static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) mvm->ftm_initiator.req_wdev = NULL; memset(mvm->ftm_initiator.responses, 0, sizeof(mvm->ftm_initiator.responses)); + list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { list_del(&e->list); kfree(e); @@ -120,6 +218,30 @@ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) iwl_mvm_ftm_reset(mvm); } +void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) +{ + INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); + + IWL_DEBUG_INFO(mvm, + "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", + IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, + IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, + IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, + IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, + IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); +} + +void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) +{ + struct iwl_mvm_smooth_entry *se, *st; + + list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, + list) { + list_del(&se->list); + kfree(se); + } +} + static int iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) { @@ -166,7 +288,7 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_tof_range_req_cmd *cmd, + struct iwl_tof_range_req_cmd_v9 *cmd, struct cfg80211_pmsr_request *req) { int i; @@ -335,7 +457,7 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, static void iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, - struct iwl_tof_range_req_ap_entry *target) + struct iwl_tof_range_req_ap_entry_v6 *target) { memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = @@ -411,7 +533,7 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, - struct iwl_tof_range_req_ap_entry *target) + struct iwl_tof_range_req_ap_entry_v6 *target) { int ret; @@ -421,7 +543,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) return ret; - iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); + iwl_mvm_ftm_put_target_common(mvm, peer, target); if (vif->bss_conf.assoc && !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { @@ -539,7 +661,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { - struct iwl_tof_range_req_cmd cmd; + struct iwl_tof_range_req_cmd_v9 cmd; struct iwl_host_cmd hcmd = { .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), .dataflags[0] = IWL_HCMD_DFL_DUP, @@ -553,7 +675,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; - struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i]; + struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); if (err) @@ -563,6 +685,93 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } +static void iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct iwl_tof_range_req_ap_entry_v6 *target = data; + + if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) + return; + + WARN_ON(!sta->mfp); + + if (WARN_ON(key->keylen > sizeof(target->tk))) + return; + + memcpy(target->tk, key->key, key->keylen); + target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); + WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); +} + +static void +iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_tof_range_req_ap_entry_v7 *target) +{ + struct iwl_mvm_ftm_pasn_entry *entry; + u32 flags = le32_to_cpu(target->initiator_ap_flags); + + if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | + IWL_INITIATOR_AP_FLAGS_TB))) + return; + + lockdep_assert_held(&mvm->mutex); + + list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { + if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) + continue; + + target->cipher = entry->cipher; + memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); + + if (vif->bss_conf.assoc && + !memcmp(vif->bss_conf.bssid, target->bssid, + sizeof(target->bssid))) + ieee80211_iter_keys(mvm->hw, vif, iter, target); + else + memcpy(target->tk, entry->tk, sizeof(target->tk)); + + memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); + memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); + + target->initiator_ap_flags |= + cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); + return; + } +} + +static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v11 cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; + + err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); + if (err) + return err; + + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + } + + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { @@ -577,9 +786,13 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (new_api) { u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RANGE_REQ_CMD); + TOF_RANGE_REQ_CMD, + IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 11: + err = iwl_mvm_ftm_start_v11(mvm, vif, req); + break; case 9: case 10: err = iwl_mvm_ftm_start_v9(mvm, vif, req); @@ -696,6 +909,95 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, return 0; } +static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, + struct cfg80211_pmsr_result *res) +{ + struct iwl_mvm_smooth_entry *resp; + s64 rtt_avg, rtt = res->ftm.rtt_avg; + u32 undershoot, overshoot; + u8 alpha; + bool found; + + if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) + return; + + WARN_ON(rtt < 0); + + if (res->status != NL80211_PMSR_STATUS_SUCCESS) { + IWL_DEBUG_INFO(mvm, + ": %pM: ignore failed measurement. Status=%u\n", + res->addr, res->status); + return; + } + + found = false; + list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) { + if (!memcmp(res->addr, resp->addr, ETH_ALEN)) { + found = true; + break; + } + } + + if (!found) { + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return; + + memcpy(resp->addr, res->addr, ETH_ALEN); + list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); + + resp->rtt_avg = rtt; + + IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", + resp->addr, resp->rtt_avg); + goto update_time; + } + + if (res->host_time - resp->host_time > + IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { + resp->rtt_avg = rtt; + + IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", + resp->addr, resp->rtt_avg); + goto update_time; + } + + /* Smooth the results based on the tracked RTT average */ + undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; + overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; + alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; + + rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100; + + IWL_DEBUG_INFO(mvm, + "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", + resp->addr, resp->rtt_avg, rtt_avg, rtt); + + /* + * update the responder's average RTT results regardless of + * the under/over shoot logic below + */ + resp->rtt_avg = rtt_avg; + + /* smooth the results */ + if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { + res->ftm.rtt_avg = rtt_avg; + + IWL_DEBUG_INFO(mvm, + "undershoot: val=%lld\n", + (rtt_avg - rtt)); + } else if (rtt_avg < rtt && (rtt - rtt_avg) > + overshoot) { + res->ftm.rtt_avg = rtt_avg; + IWL_DEBUG_INFO(mvm, + "overshoot: val=%lld\n", + (rtt - rtt_avg)); + } + +update_time: + resp->host_time = res->host_time; +} + static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, struct cfg80211_pmsr_result *res) { @@ -715,12 +1017,31 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); } +static void +iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, + struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) +{ + struct iwl_mvm_ftm_pasn_entry *entry; + + lockdep_assert_held(&mvm->mutex); + + list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { + if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) + continue; + + memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); + memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); + return; + } +} + void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; - struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data; + struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; + struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; int i; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); @@ -733,12 +1054,12 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) } if (new_api) { - if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp->request_id, - fw_resp->num_of_aps)) + if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, + fw_resp_v8->num_of_aps)) return; - num_of_aps = fw_resp->num_of_aps; - last_in_batch = fw_resp->last_report; + num_of_aps = fw_resp_v8->num_of_aps; + last_in_batch = fw_resp_v8->last_report; } else { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, fw_resp_v5->num_of_aps)) @@ -754,17 +1075,21 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { struct cfg80211_pmsr_result result = {}; - struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; + struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; int peer_idx; if (new_api) { - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) - fw_ap = &fw_resp->ap[i]; - else + if (mvm->cmd_ver.range_resp == 8) { + fw_ap = &fw_resp_v8->ap[i]; + iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); + } else if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) { + fw_ap = (void *)&fw_resp_v7->ap[i]; + } else { fw_ap = (void *)&fw_resp_v6->ap[i]; + } - result.final = fw_resp->ap[i].last_burst; + result.final = fw_ap->last_burst; result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); result.ap_tsf_valid = 1; } else { @@ -830,6 +1155,8 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) iwl_mvm_ftm_get_lci_civic(mvm, &result); + iwl_mvm_ftm_rtt_smoothing(mvm, &result); + cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 0b6c32098b5a..c794612c41d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -62,6 +62,18 @@ #include "mvm.h" #include "constants.h" +struct iwl_mvm_pasn_sta { + struct list_head list; + struct iwl_mvm_int_sta int_sta; + u8 addr[ETH_ALEN]; +}; + +struct iwl_mvm_pasn_hltk_data { + u8 *addr; + u8 cipher; + u8 *hltk; +}; + static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef, u8 *bw, u8 *ctrl_ch_position) { @@ -137,7 +149,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, .sta_id = mvmvif->bcast_sta.sta_id, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RESPONDER_CONFIG_CMD); + TOF_RESPONDER_CONFIG_CMD, 6); int err; lockdep_assert_held(&mvm->mutex); @@ -162,11 +174,11 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, } static int -iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_ftm_responder_params *params) +iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_ftm_responder_params *params) { - struct iwl_tof_responder_dyn_config_cmd cmd = { + struct iwl_tof_responder_dyn_config_cmd_v2 cmd = { .lci_len = cpu_to_le32(params->lci_len + 2), .civic_len = cpu_to_le32(params->civicloc_len + 2), }; @@ -207,6 +219,173 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, return iwl_mvm_send_cmd(mvm, &hcmd); } +static int +iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_ftm_responder_params *params, + struct iwl_mvm_pasn_hltk_data *hltk_data) +{ + struct iwl_tof_responder_dyn_config_cmd cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD, + LOCATION_GROUP, 0), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + /* may not be able to DMA from stack */ + .dataflags[0] = IWL_HCMD_DFL_DUP, + }; + + lockdep_assert_held(&mvm->mutex); + + cmd.valid_flags = 0; + + if (params) { + if (params->lci_len + 2 > sizeof(cmd.lci_buf) || + params->civicloc_len + 2 > sizeof(cmd.civic_buf)) { + IWL_ERR(mvm, + "LCI/civic data too big (lci=%zd, civic=%zd)\n", + params->lci_len, params->civicloc_len); + return -ENOBUFS; + } + + cmd.lci_buf[0] = WLAN_EID_MEASURE_REPORT; + cmd.lci_buf[1] = params->lci_len; + memcpy(cmd.lci_buf + 2, params->lci, params->lci_len); + cmd.lci_len = params->lci_len + 2; + + cmd.civic_buf[0] = WLAN_EID_MEASURE_REPORT; + cmd.civic_buf[1] = params->civicloc_len; + memcpy(cmd.civic_buf + 2, params->civicloc, + params->civicloc_len); + cmd.civic_len = params->civicloc_len + 2; + + cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_LCI | + IWL_RESPONDER_DYN_CFG_VALID_CIVIC; + } + + if (hltk_data) { + if (hltk_data->cipher > IWL_LOCATION_CIPHER_GCMP_256) { + IWL_ERR(mvm, "invalid cipher: %u\n", + hltk_data->cipher); + return -EINVAL; + } + + cmd.cipher = hltk_data->cipher; + memcpy(cmd.addr, hltk_data->addr, sizeof(cmd.addr)); + memcpy(cmd.hltk_buf, hltk_data->hltk, sizeof(cmd.hltk_buf)); + cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_PASN_STA; + } + + return iwl_mvm_send_cmd(mvm, &hcmd); +} + +static int +iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_ftm_responder_params *params) +{ + int ret; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, + TOF_RESPONDER_DYN_CONFIG_CMD, 2); + + switch (cmd_ver) { + case 2: + ret = iwl_mvm_ftm_responder_dyn_cfg_v2(mvm, vif, + params); + break; + case 3: + ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, + params, NULL); + break; + default: + IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n", + cmd_ver); + ret = -ENOTSUPP; + } + + return ret; +} + +static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_pasn_sta *sta) +{ + list_del(&sta->list); + iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); + iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta); + kfree(sta); +} + +int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u8 *addr, u32 cipher, u8 *tk, u32 tk_len, + u8 *hltk, u32 hltk_len) +{ + int ret; + struct iwl_mvm_pasn_sta *sta = NULL; + struct iwl_mvm_pasn_hltk_data hltk_data = { + .addr = addr, + .hltk = hltk, + }; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, + TOF_RESPONDER_DYN_CONFIG_CMD, 2); + + lockdep_assert_held(&mvm->mutex); + + if (cmd_ver < 3) { + IWL_ERR(mvm, "Adding PASN station not supported by FW\n"); + return -ENOTSUPP; + } + + hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); + if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { + IWL_ERR(mvm, "invalid cipher: %u\n", cipher); + return -EINVAL; + } + + if (tk && tk_len) { + sta = kzalloc(sizeof(*sta), GFP_KERNEL); + if (!sta) + return -ENOBUFS; + + ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr, + cipher, tk, tk_len); + if (ret) { + kfree(sta); + return ret; + } + } + + ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, &hltk_data); + if (ret) { + if (sta) + iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); + return ret; + } + + memcpy(sta->addr, addr, ETH_ALEN); + list_add_tail(&sta->list, &mvm->resp_pasn_list); + return 0; +} + +int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, u8 *addr) +{ + struct iwl_mvm_pasn_sta *sta, *prev; + + lockdep_assert_held(&mvm->mutex); + + list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) { + if (!memcmp(sta->addr, addr, ETH_ALEN)) { + iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); + return 0; + } + } + + IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr); + return -EINVAL; +} + int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -255,12 +434,24 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } +void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_pasn_sta *sta, *prev; + + lockdep_assert_held(&mvm->mutex); + + list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) + iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); +} + void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (!vif->bss_conf.ftm_responder) return; + iwl_mvm_ftm_responder_clear(mvm, vif); iwl_mvm_ftm_start_responder(mvm, vif); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 95a613537047..4ea1032d8263 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -210,25 +210,36 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; - struct mvm_alive_resp_v3 *palive3; - struct mvm_alive_resp *palive; struct iwl_umac_alive *umac; struct iwl_lmac_alive *lmac1; struct iwl_lmac_alive *lmac2 = NULL; u16 status; - u32 lmac_error_event_table, umac_error_event_table; + u32 lmac_error_event_table, umac_error_table; + + /* we don't use the SKU ID from v5 yet, so handle it as v4 */ + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + UCODE_ALIVE_NTFY, 0) == 5 || + iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) { + struct iwl_alive_ntf_v4 *palive; - if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); - } else { + } else if (iwl_rx_packet_payload_len(pkt) == + sizeof(struct iwl_alive_ntf_v3)) { + struct iwl_alive_ntf_v3 *palive3; + palive3 = (void *)pkt->data; umac = &palive3->umac_data; lmac1 = &palive3->lmac_data; status = le16_to_cpu(palive3->status); + } else { + WARN(1, "unsupported alive notification (size %d)\n", + iwl_rx_packet_payload_len(pkt)); + /* get timeout later */ + return false; } lmac_error_event_table = @@ -239,26 +250,22 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, mvm->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); - umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); + umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); - if (!umac_error_event_table) { - mvm->support_umac_log = false; - } else if (umac_error_event_table >= - mvm->trans->cfg->min_umac_error_event_table) { - mvm->support_umac_log = true; - } else { - IWL_ERR(mvm, - "Not valid error log pointer 0x%08X for %s uCode\n", - umac_error_event_table, - (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? - "Init" : "RT"); - mvm->support_umac_log = false; + if (umac_error_table) { + if (umac_error_table >= + mvm->trans->cfg->min_umac_error_event_table) { + iwl_fw_umac_set_alive_err_table(mvm->trans, + umac_error_table); + } else { + IWL_ERR(mvm, + "Not valid error log pointer 0x%08X for %s uCode\n", + umac_error_table, + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? + "Init" : "RT"); + } } - if (mvm->support_umac_log) - iwl_fw_umac_set_alive_err_table(mvm->trans, - umac_error_event_table); - alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); alive_data->valid = status == IWL_ALIVE_STATUS_OK; @@ -310,7 +317,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, const struct fw_img *fw; int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; - static const u16 alive_cmd[] = { MVM_ALIVE }; + static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; bool run_in_rfkill = ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); @@ -590,7 +597,8 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) mvm->fw->default_calib[ucode_type].flow_trigger; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - PHY_CONFIGURATION_CMD); + PHY_CONFIGURATION_CMD, + IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 3) { iwl_mvm_phy_filter_init(mvm, &phy_filters); memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters, @@ -740,28 +748,42 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) #ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { - union { - struct iwl_dev_tx_power_cmd v5; - struct iwl_dev_tx_power_cmd_v4 v4; - } cmd = { - .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + struct iwl_dev_tx_power_cmd cmd = { + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; + __le16 *per_chain; int ret; u16 len = 0; - - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_REDUCE_TX_POWER)) + u32 n_subbands; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + REDUCE_TX_POWER_CMD, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver == 6) { + len = sizeof(cmd.v6); + n_subbands = IWL_NUM_SUB_BANDS_V2; + per_chain = cmd.v6.per_chain[0][0]; + } else if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_REDUCE_TX_POWER)) { len = sizeof(cmd.v5); - else if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) - len = sizeof(struct iwl_dev_tx_power_cmd_v4); - else - len = sizeof(cmd.v4.v3); + n_subbands = IWL_NUM_SUB_BANDS; + per_chain = cmd.v5.per_chain[0][0]; + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) { + len = sizeof(cmd.v4); + n_subbands = IWL_NUM_SUB_BANDS; + per_chain = cmd.v4.per_chain[0][0]; + } else { + len = sizeof(cmd.v3); + n_subbands = IWL_NUM_SUB_BANDS; + per_chain = cmd.v3.per_chain[0][0]; + } + /* all structs have the same common part, add it */ + len += sizeof(cmd.common); - ret = iwl_sar_select_profile(&mvm->fwrt, - cmd.v5.v3.per_chain_restriction, - prof_a, prof_b); + ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES, + n_subbands, prof_a, prof_b); /* return on error or if the profile is disabled (positive number) */ if (ret) @@ -773,21 +795,26 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { - union geo_tx_power_profiles_cmd geo_tx_cmd; + union iwl_geo_tx_power_profiles_cmd geo_tx_cmd; + struct iwl_geo_tx_power_profiles_resp *resp; u16 len; int ret; struct iwl_host_cmd cmd; - - if (fw_has_api(&mvm->fwrt.fw->ucode_capa, - IWL_UCODE_TLV_API_SAR_TABLE_VER)) { - geo_tx_cmd.geo_cmd.ops = - cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); - len = sizeof(geo_tx_cmd.geo_cmd); - } else { - geo_tx_cmd.geo_cmd_v1.ops = - cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); - len = sizeof(geo_tx_cmd.geo_cmd_v1); - } + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, + GEO_TX_POWER_LIMIT, + IWL_FW_CMD_VER_UNKNOWN); + + /* the ops field is at the same spot for all versions, so set in v1 */ + geo_tx_cmd.v1.ops = + cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); + + if (cmd_ver == 3) + len = sizeof(geo_tx_cmd.v3); + else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, + IWL_UCODE_TLV_API_SAR_TABLE_VER)) + len = sizeof(geo_tx_cmd.v2); + else + len = sizeof(geo_tx_cmd.v1); if (!iwl_sar_geo_support(&mvm->fwrt)) return -EOPNOTSUPP; @@ -804,21 +831,30 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); return ret; } - ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd); + + resp = (void *)cmd.resp_pkt->data; + ret = le32_to_cpu(resp->profile_idx); + + if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) + ret = -EIO; + iwl_free_resp(&cmd); return ret; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { - u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); - union geo_tx_power_profiles_cmd cmd; + union iwl_geo_tx_power_profiles_cmd cmd; u16 len; int ret; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, + GEO_TX_POWER_LIMIT, + IWL_FW_CMD_VER_UNKNOWN); - cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); + /* the table is also at the same position both in v1 and v2 */ + ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], + ACPI_WGDS_NUM_BANDS); - ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table); /* * It is a valid scenario to not support SAR, or miss wgds table, * but in that case there is no need to send the command. @@ -826,42 +862,75 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) if (ret) return 0; - cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + /* the ops field is at the same spot for all versions, so set in v1 */ + cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); - if (!fw_has_api(&mvm->fwrt.fw->ucode_capa, - IWL_UCODE_TLV_API_SAR_TABLE_VER)) { - len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1); + if (cmd_ver == 3) { + len = sizeof(cmd.v3); + cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, + IWL_UCODE_TLV_API_SAR_TABLE_VER)) { + len = sizeof(cmd.v2); + cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); } else { - len = sizeof(cmd.geo_cmd); + len = sizeof(cmd.v1); } - return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT), + 0, len, &cmd); } static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) { union acpi_object *wifi_pkg, *data, *enabled; - int i, j, ret, tbl_rev; + union iwl_ppag_table_cmd ppag_table; + int i, j, ret, tbl_rev, num_sub_bands; int idx = 2; + s8 *gain; - mvm->fwrt.ppag_table.enabled = cpu_to_le32(0); + /* + * The 'enabled' field is the same in v1 and v2 so we can just + * use v1 to access it. + */ + mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0); data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* try to read ppag table revision 1 */ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, - ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); - - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = -EINVAL; + goto out_free; + } + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = mvm->fwrt.ppag_table.v2.gain[0]; + mvm->fwrt.ppag_ver = 2; + IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n"); + goto read_table; } - if (tbl_rev != 0) { - ret = -EINVAL; - goto out_free; + /* try to read ppag table revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data, + ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = -EINVAL; + goto out_free; + } + num_sub_bands = IWL_NUM_SUB_BANDS; + gain = mvm->fwrt.ppag_table.v1.gain[0]; + mvm->fwrt.ppag_ver = 1; + IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n"); + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; +read_table: enabled = &wifi_pkg->package.elements[1]; if (enabled->type != ACPI_TYPE_INTEGER || (enabled->integer.value != 0 && enabled->integer.value != 1)) { @@ -869,8 +938,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; } - mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value); - if (!mvm->fwrt.ppag_table.enabled) { + ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value); + if (!ppag_table.v1.enabled) { ret = 0; goto out_free; } @@ -880,8 +949,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the * following sub-bands to High-Band (5GHz). */ - for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { - for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + for (j = 0; j < num_sub_bands; j++) { union acpi_object *ent; ent = &wifi_pkg->package.elements[idx++]; @@ -890,11 +959,11 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) || (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) || (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) { - mvm->fwrt.ppag_table.enabled = cpu_to_le32(0); + ppag_table.v1.enabled = cpu_to_le32(0); ret = -EINVAL; goto out_free; } - mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value; + gain[i * num_sub_bands + j] = ent->integer.value; } } ret = 0; @@ -905,34 +974,56 @@ out_free: int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { - int i, j, ret; + u8 cmd_ver; + int i, j, ret, num_sub_bands, cmd_size; + union iwl_ppag_table_cmd ppag_table; + s8 *gain; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { IWL_DEBUG_RADIO(mvm, "PPAG capability not supported by FW, command not sent.\n"); return 0; } - - if (!mvm->fwrt.ppag_table.enabled) { - IWL_DEBUG_RADIO(mvm, - "PPAG not enabled, command not sent.\n"); + if (!mvm->fwrt.ppag_table.v1.enabled) { + IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n"); return 0; } - IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD, + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver == 1) { + num_sub_bands = IWL_NUM_SUB_BANDS; + gain = mvm->fwrt.ppag_table.v1.gain[0]; + cmd_size = sizeof(ppag_table.v1); + if (mvm->fwrt.ppag_ver == 2) { + IWL_DEBUG_RADIO(mvm, + "PPAG table is v2 but FW supports v1, sending truncated table\n"); + } + } else if (cmd_ver == 2) { + num_sub_bands = IWL_NUM_SUB_BANDS_V2; + gain = mvm->fwrt.ppag_table.v2.gain[0]; + cmd_size = sizeof(ppag_table.v2); + if (mvm->fwrt.ppag_ver == 1) { + IWL_DEBUG_RADIO(mvm, + "PPAG table is v1 but FW supports v2, sending padded table\n"); + } + } else { + IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n"); + return 0; + } - for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { - for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + for (j = 0; j < num_sub_bands; j++) { IWL_DEBUG_RADIO(mvm, "PPAG table: chain[%d] band[%d]: gain = %d\n", - i, j, mvm->fwrt.ppag_table.gain[i][j]); + i, j, gain[i * num_sub_bands + j]); } } - + IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), - 0, sizeof(mvm->fwrt.ppag_table), - &mvm->fwrt.ppag_table); + 0, cmd_size, &ppag_table); if (ret < 0) IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", ret); @@ -989,41 +1080,90 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } -static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) +static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) { int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, DSM_FUNC_ENABLE_INDONESIA_5G2); - IWL_DEBUG_RADIO(mvm, - "Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n", - ret); + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n", + ret); + + else if (ret >= DSM_VALUE_INDONESIA_MAX) + IWL_DEBUG_RADIO(mvm, + "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n", + ret); + + else if (ret == DSM_VALUE_INDONESIA_ENABLE) { + IWL_DEBUG_RADIO(mvm, + "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n"); + return DSM_VALUE_INDONESIA_ENABLE; + } + /* default behaviour is disabled */ + return DSM_VALUE_INDONESIA_DISABLE; +} + +static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm) +{ + int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, + DSM_FUNC_DISABLE_SRD); + + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n", + ret); + + else if (ret >= DSM_VALUE_SRD_MAX) + IWL_DEBUG_RADIO(mvm, + "DSM function DISABLE_SRD return invalid value, ret=%d\n", + ret); + + else if (ret == DSM_VALUE_SRD_PASSIVE) { + IWL_DEBUG_RADIO(mvm, + "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n"); + return DSM_VALUE_SRD_PASSIVE; - return ret == 1; + } else if (ret == DSM_VALUE_SRD_DISABLE) { + IWL_DEBUG_RADIO(mvm, + "Evaluated DSM function DISABLE_SRD: disabling SRD\n"); + return DSM_VALUE_SRD_DISABLE; + } + /* default behaviour is active */ + return DSM_VALUE_SRD_ACTIVE; } static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { - int ret; + u8 ret; + int cmd_ret; struct iwl_lari_config_change_cmd cmd = {}; - if (iwl_mvm_eval_dsm_indonesia_5g2(mvm)) + if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE) cmd.config_bitmap |= cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + ret = iwl_mvm_eval_dsm_disable_srd(mvm); + if (ret == DSM_VALUE_SRD_PASSIVE) + cmd.config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); + + else if (ret == DSM_VALUE_SRD_DISABLE) + cmd.config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + /* apply more config masks here */ if (cmd.config_bitmap) { - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n", - le32_to_cpu(cmd.config_bitmap)); - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE), - 0, sizeof(cmd), &cmd); - if (ret < 0) + IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE\n"); + cmd_ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), + 0, sizeof(cmd), &cmd); + if (cmd_ret < 0) IWL_DEBUG_RADIO(mvm, "Failed to send LARI_CONFIG_CHANGE (%d)\n", - ret); + cmd_ret); } } #else /* CONFIG_ACPI */ @@ -1383,6 +1523,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); + iwl_mvm_ftm_initiator_smooth_config(mvm); + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 81bc05d70267..8c98def8b7f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -5,10 +5,9 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,10 +27,9 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -704,8 +702,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) + if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) { ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); + if (vif->bss_conf.twt_protected) + ctxt_sta->data_policy |= + cpu_to_le32(PROTECTED_TWT_SUPPORTED); + } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 9374c85c5caf..5c9bde99ce19 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -234,6 +234,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcc_update_resp *resp; + u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); @@ -252,13 +253,16 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } + resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + MCC_UPDATE_CMD, 0); + IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), - __le16_to_cpu(resp->cap)); + __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; kfree(resp); @@ -662,6 +666,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; + if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + WOWLAN_KEK_KCK_MATERIAL, + IWL_FW_CMD_VER_UNKNOWN) == 3) + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, @@ -753,6 +762,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_PROTECTED_TWT); + hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); @@ -1203,6 +1216,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); + iwl_mvm_ftm_initiator_smooth_stop(mvm); + /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ @@ -1300,27 +1315,32 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { int len; - union { - struct iwl_dev_tx_power_cmd v5; - struct iwl_dev_tx_power_cmd_v4 v4; - } cmd = { - .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .v5.v3.mac_context_id = + struct iwl_dev_tx_power_cmd cmd = { + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power), + .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + REDUCE_TX_POWER_CMD, + IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_REDUCE_TX_POWER)) + if (cmd_ver == 6) + len = sizeof(cmd.v6); + else if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else - len = sizeof(cmd.v4.v3); + len = sizeof(cmd.v3); + + /* all structs have the same common part, add it */ + len += sizeof(cmd.common); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } @@ -2629,6 +2649,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_update_quotas(mvm, false, NULL); + iwl_mvm_ftm_responder_clear(mvm, vif); + /* * This is not very nice, but the simplest: * For older FWs removing the mcast sta before the bcast station may @@ -3428,15 +3450,16 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = -EOPNOTSUPP; - else - ret = 0; + break; + } if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; + ret = 0; break; } @@ -3452,6 +3475,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) ret = -ENOSPC; + else + ret = 0; break; } @@ -3693,9 +3718,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, - "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", - channel->hw_value, req_dur, duration, delay, - dtim_interval); + "ROC: Requesting to remain on channel %u for %ums\n", + channel->hw_value, req_dur); + IWL_DEBUG_TE(mvm, + "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", + duration, delay, dtim_interval); + /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index e2f7f6ec711e..9187f8a1126d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -184,11 +184,6 @@ enum iwl_power_scheme { IWL_POWER_SCHEME_LP }; -union geo_tx_power_profiles_cmd { - struct iwl_geo_tx_power_profiles_cmd geo_cmd; - struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1; -}; - #define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL @@ -421,7 +416,11 @@ struct iwl_mvm_vif { #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ struct { - u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN]; + u8 kck[NL80211_KCK_EXT_LEN]; + u8 kek[NL80211_KEK_EXT_LEN]; + size_t kek_len; + size_t kck_len; + u32 akm; __le64 replay_ctr; bool valid; } rekey_data; @@ -852,7 +851,6 @@ struct iwl_mvm { bool hw_registered; bool rfkill_safe_init_done; - bool support_umac_log; u32 ampdu_ref; bool ampdu_toggle; @@ -1113,10 +1111,17 @@ struct iwl_mvm { struct wireless_dev *req_wdev; struct list_head loc_list; int responses[IWL_MVM_TOF_MAX_APS]; + struct { + struct list_head resp; + } smooth; + struct list_head pasn_list; } ftm_initiator; + struct list_head resp_pasn_list; + struct { u8 d0i3_resp; + u8 range_resp; } cmd_ver; struct ieee80211_vif *nan_vif; @@ -1996,6 +2001,14 @@ void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, u8 *addr); +int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u8 *addr, u32 cipher, u8 *tk, u32 tk_len, + u8 *hltk, u32 hltk_len); +void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); /* FTM initiator */ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm); @@ -2006,6 +2019,12 @@ void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); +void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm); +void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm); +int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u8 *addr, u32 cipher, u8 *tk, u32 tk_len, + u8 *hltk, u32 hltk_len); +void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr); /* TDLS */ @@ -2146,8 +2165,24 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw) { u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP, - SCAN_OFFLOAD_UPDATE_PROFILES_CMD); + SCAN_OFFLOAD_UPDATE_PROFILES_CMD, + IWL_FW_CMD_VER_UNKNOWN); return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ? IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2; } + +static inline +enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_CCMP: + return IWL_LOCATION_CIPHER_CCMP_128; + case WLAN_CIPHER_SUITE_GCMP: + return IWL_LOCATION_CIPHER_GCMP_128; + case WLAN_CIPHER_SUITE_GCMP_256: + return IWL_LOCATION_CIPHER_GCMP_256; + default: + return IWL_LOCATION_CIPHER_INVALID; + } +} #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d095ff847be9..b86d62eff284 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -133,6 +133,7 @@ module_exit(iwl_mvm_exit); static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct iwl_trans_debug *dbg = &mvm->trans->dbg; u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val = 0; u32 phy_config = iwl_mvm_get_phy_config(mvm); @@ -169,7 +170,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; - if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) + if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) || + (iwl_trans_dbg_ini_valid(mvm->trans) && + dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location) + ) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, @@ -319,7 +323,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { - HCMD_NAME(MVM_ALIVE), + HCMD_NAME(UCODE_ALIVE_NTFY), HCMD_NAME(REPLY_ERROR), HCMD_NAME(ECHO_CMD), HCMD_NAME(INIT_COMPLETE_NOTIF), @@ -695,6 +699,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_LIST_HEAD(&mvm->async_handlers_list); spin_lock_init(&mvm->time_event_lock); INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); + INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); + INIT_LIST_HEAD(&mvm->resp_pasn_list); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); @@ -724,6 +730,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1)) goto out_free; + mvm->cmd_ver.range_resp = + iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, + TOF_RANGE_RESPONSE_NOTIF, 5); + /* we only support up to version 8 */ + if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 8)) + goto out_free; + /* * Populate the state variables that the transport layer needs * to know about. @@ -756,7 +769,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.rx_buf_size = rb_size_default; } - trans->wide_cmd_header = true; trans_cfg.bc_table_dword = mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 0243dbe8ac49..a5da4106ba5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -7,8 +7,8 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -125,30 +125,19 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) */ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd *cmd, - u32 action, u32 apply_time) + u32 action) { - memset(cmd, 0, sizeof(struct iwl_phy_context_cmd)); - cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt->color)); cmd->action = cpu_to_le32(action); - cmd->apply_time = cpu_to_le32(apply_time); } -/* - * Add the phy configuration to the PHY context command - */ -static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, - struct iwl_phy_context_cmd *cmd, - struct cfg80211_chan_def *chandef, - u8 chains_static, u8 chains_dynamic) +static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm, + __le32 *rxchain_info, + u8 chains_static, + u8 chains_dynamic) { u8 active_cnt, idle_cnt; - struct iwl_phy_context_cmd_tail *tail = - iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci); - - /* Set the channel info data */ - iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); /* Set rx the chains */ idle_cnt = chains_static; @@ -166,20 +155,59 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, active_cnt = 2; } - tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << + *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); - tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); - tail->rxchain_info |= cpu_to_le32(active_cnt << + *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); + *rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); #ifdef CONFIG_IWLWIFI_DEBUGFS if (unlikely(mvm->dbgfs_rx_phyinfo)) - tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); + *rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); #endif +} + +/* + * Add the phy configuration to the PHY context command + */ +static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm, + struct iwl_phy_context_cmd_v1 *cmd, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + struct iwl_phy_context_cmd_tail *tail = + iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci); + + /* Set the channel info data */ + iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); + + iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info, + chains_static, chains_dynamic); tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } /* + * Add the phy configuration to the PHY context command + */ +static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, + struct iwl_phy_context_cmd *cmd, + struct cfg80211_chan_def *chandef, + u8 chains_static, u8 chains_dynamic) +{ + if (chandef->chan->band == NL80211_BAND_2GHZ || + !iwl_mvm_is_cdb_supported(mvm)) + cmd->lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX); + else + cmd->lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX); + + /* Set the channel info data */ + iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); + + iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info, + chains_static, chains_dynamic); +} + +/* * Send a command to apply the current phy configuration. The command is send * only if something in the configuration changed: in case that this is the * first time that the phy configuration is applied or in case that the phy @@ -189,20 +217,46 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic, - u32 action, u32 apply_time) + u32 action) { - struct iwl_phy_context_cmd cmd; int ret; - u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); - - /* Set the command header fields */ - iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + PHY_CONTEXT_CMD, 1); + + if (ver == 3) { + struct iwl_phy_context_cmd cmd = {}; + + /* Set the command header fields */ + iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action); + + /* Set the command data */ + iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, + chains_static, + chains_dynamic); + + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, + 0, sizeof(cmd), &cmd); + } else if (ver < 3) { + struct iwl_phy_context_cmd_v1 cmd = {}; + u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); + + /* Set the command header fields */ + iwl_mvm_phy_ctxt_cmd_hdr(ctxt, + (struct iwl_phy_context_cmd *)&cmd, + action); + + /* Set the command data */ + iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef, + chains_static, + chains_dynamic); + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, + 0, len, &cmd); + } else { + IWL_ERR(mvm, "PHY ctxt cmd error ver %d not supported\n", ver); + return -EOPNOTSUPP; + } - /* Set the command data */ - iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, - chains_static, chains_dynamic); - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd); if (ret) IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); return ret; @@ -223,7 +277,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, - FW_CTXT_ACTION_ADD, 0); + FW_CTXT_ACTION_ADD); } /* @@ -257,7 +311,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, /* ... remove it here ...*/ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, - FW_CTXT_ACTION_REMOVE, 0); + FW_CTXT_ACTION_REMOVE); if (ret) return ret; @@ -269,7 +323,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->width = chandef->width; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, - action, 0); + action); } void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 6f4d241d47e9..012123268ba9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -195,14 +195,20 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, { u16 supp; int i, highest_mcs; - u8 nss = sta->rx_nss; + u8 max_nss = sta->rx_nss; + struct ieee80211_vht_cap ieee_vht_cap = { + .vht_cap_info = cpu_to_le32(vht_cap->cap), + .supp_mcs = vht_cap->vht_mcs, + }; /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) - nss = 1; + max_nss = 1; - for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { - highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); + for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) { + int nss = i + 1; + + highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, nss); if (!highest_mcs) continue; @@ -211,7 +217,15 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp); - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + /* + * Check if VHT extended NSS indicates that the bandwidth/NSS + * configuration is supported - only for MCS 0 since we already + * decoded the MCS bits anyway ourselves. + */ + if (sta->bandwidth == IEEE80211_STA_RX_BW_160 && + ieee80211_get_vht_max_nss(&ieee_vht_cap, + IEEE80211_VHT_CHANWIDTH_160MHZ, + 0, true, nss) >= nss) cmd->ht_rates[i][IWL_TLC_HT_BW_160] = cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160]; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 86b2ebb5d5fb..ed7382e7ea17 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -830,6 +830,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm, return ucode_rate; } + /* set RTS protection for all non legacy rates + * This helps with congested environments reducing the conflict cost to + * RTS retries only, instead of the entire BA packet. + */ + ucode_rate |= RATE_MCS_RTS_REQUIRED_MSK; + if (is_ht(rate)) { if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) { IWL_ERR(mvm, "Invalid HT rate index %d\n", index); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index c15f7dbc9516..ea29aeb86eef 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation + * Copyright(c) 2018 - 2020 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -221,6 +221,31 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); + /* + * If we did CHECKSUM_COMPLETE, the hardware only does it right for + * certain cases and starts the checksum after the SNAP. Check if + * this is the case - it's easier to just bail out to CHECKSUM_NONE + * in the cases the hardware didn't handle, since it's rare to see + * such packets, even though the hardware did calculate the checksum + * in this case, just starting after the MAC header instead. + */ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + struct { + u8 hdr[6]; + __be16 type; + } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len); + + if (unlikely(headlen - hdrlen < sizeof(*shdr) || + !ether_addr_equal(shdr->hdr, rfc1042_header) || + (shdr->type != htons(ETH_P_IP) && + shdr->type != htons(ETH_P_ARP) && + shdr->type != htons(ETH_P_IPV6) && + shdr->type != htons(ETH_P_8021Q) && + shdr->type != htons(ETH_P_PAE) && + shdr->type != htons(ETH_P_TDLS)))) + skb->ip_summed = CHECKSUM_NONE; + } + fraglen = len - headlen; if (fraglen) { @@ -308,7 +333,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, int queue, u8 *crypt_len) { - u16 status = le16_to_cpu(desc->status); + u32 status = le32_to_cpu(desc->status); /* * Drop UNKNOWN frames in aggregation, unless in monitor mode @@ -393,22 +418,36 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, return 0; } -static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, +static void iwl_mvm_rx_csum(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, struct sk_buff *skb, - struct iwl_rx_mpdu_desc *desc) + struct iwl_rx_packet *pkt) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - u16 flags = le16_to_cpu(desc->l3l4_flags); - u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> - IWL_RX_L3_PROTO_POS); - - if (mvmvif->features & NETIF_F_RXCSUM && - flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && - (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || - l3_prot == IWL_RX_L3_TYPE_IPV6 || - l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) - skb->ip_summed = CHECKSUM_UNNECESSARY; + struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; + + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) { + u16 hwsum = be16_to_cpu(desc->v3.raw_xsum); + + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold(~(__force __sum16)hwsum); + } + } else { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif; + u16 flags = le16_to_cpu(desc->l3l4_flags); + u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> + IWL_RX_L3_PROTO_POS); + + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mvmvif->features & NETIF_F_RXCSUM && + flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && + (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || + l3_prot == IWL_RX_L3_TYPE_IPV6 || + l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } } /* @@ -1668,10 +1707,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ - if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) || - !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) { + if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) || + !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", - le16_to_cpu(desc->status)); + le32_to_cpu(desc->status)); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* set the preamble flag if appropriate */ @@ -1731,8 +1770,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); - if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { - u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; + if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { + u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); @@ -1796,7 +1835,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } if (ieee80211_is_data(hdr->frame_control)) - iwl_mvm_rx_csum(sta, skb, desc); + iwl_mvm_rx_csum(mvm, sta, skb, pkt); if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 51a061b138ba..1fbb52713493 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -147,7 +147,7 @@ struct iwl_mvm_scan_params { struct cfg80211_match_set *match_sets; int n_scan_plans; struct cfg80211_sched_scan_plan *scan_plans; - u32 measurement_dwell; + bool iter_notif; }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) @@ -337,33 +337,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } -static int -iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm, - struct cfg80211_scan_request *req, - struct iwl_mvm_scan_params *params) -{ - u32 duration = scan_timing[params->type].max_out_time; - - if (!req->duration) - return 0; - - if (iwl_mvm_is_cdb_supported(mvm)) { - u32 hb_time = scan_timing[params->hb_type].max_out_time; - - duration = min_t(u32, duration, hb_time); - } - - if (req->duration_mandatory && req->duration > duration) { - IWL_DEBUG_SCAN(mvm, - "Measurement scan - too long dwell %hu (max out time %u)\n", - req->duration, - duration); - return -EOPNOTSUPP; - } - - return min_t(u32, (u32)req->duration, duration); -} - static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) { /* require rrm scan whenever the fw supports it */ @@ -1333,10 +1306,8 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, u8 active_dwell, passive_dwell; timing = &scan_timing[params->type]; - active_dwell = params->measurement_dwell ? - params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE; - passive_dwell = params->measurement_dwell ? - params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE; + active_dwell = IWL_SCAN_DWELL_ACTIVE; + passive_dwell = IWL_SCAN_DWELL_PASSIVE; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { cmd->v7.adwell_default_n_aps_social = @@ -1389,8 +1360,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, } } } else { - cmd->v1.extended_dwell = params->measurement_dwell ? - params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED; + cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; cmd->v1.active_dwell = active_dwell; cmd->v1.passive_dwell = passive_dwell; cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; @@ -1443,10 +1413,8 @@ iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm, u8 active_dwell, passive_dwell; timing = &scan_timing[params->type]; - active_dwell = params->measurement_dwell ? - params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE; - passive_dwell = params->measurement_dwell ? - params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE; + active_dwell = IWL_SCAN_DWELL_ACTIVE; + passive_dwell = IWL_SCAN_DWELL_PASSIVE; general_params->adwell_default_social_chn = IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; @@ -1737,7 +1705,7 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, if (!iwl_mvm_is_regular_scan(params)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC; - if (params->measurement_dwell || + if (params->iter_notif || mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE; @@ -1782,7 +1750,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (!iwl_mvm_is_regular_scan(params)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; - if (params->measurement_dwell) + if (params->iter_notif) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -2229,7 +2197,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC); + SCAN_REQ_UMAC, + IWL_FW_CMD_VER_UNKNOWN); for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) { const struct iwl_scan_umac_handler *ver_handler = @@ -2293,11 +2262,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_fill_scan_type(mvm, ¶ms, vif); - ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); - if (ret < 0) - return ret; - - params.measurement_dwell = ret; + if (req->duration) + params.iter_notif = true; iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); @@ -2569,7 +2535,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size, tail_size; u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC); + SCAN_REQ_UMAC, + IWL_FW_CMD_VER_UNKNOWN); base_size = iwl_scan_req_umac_get_size(scan_ver); if (base_size) @@ -2626,6 +2593,15 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } + uid = iwl_mvm_scan_uid_by_status(mvm, + IWL_MVM_SCAN_STOPPING_REGULAR); + if (uid >= 0) + mvm->scan_uid_status[uid] = 0; + + uid = iwl_mvm_scan_uid_by_status(mvm, + IWL_MVM_SCAN_STOPPING_SCHED); + if (uid >= 0) + mvm->scan_uid_status[uid] = 0; /* We shouldn't have any UIDs still set. Loop over all the * UIDs to make sure there's nothing left there and warn if diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9e124755a3ce..72c9235c6bd5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -770,8 +770,6 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue); - return queue; } @@ -1997,7 +1995,7 @@ static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) } static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, - int maccolor, + int maccolor, u8 *addr, struct iwl_mvm_int_sta *sta, u16 *queue, int fifo) { @@ -2007,7 +2005,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); - ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor); + ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_disable_txq(mvm, NULL, *queue, @@ -2047,7 +2045,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) if (ret) return ret; - ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0, + ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0, NULL, &mvm->aux_sta, &mvm->aux_queue, IWL_MVM_TX_FIFO_MCAST); if (ret) { @@ -2065,7 +2063,8 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, - &mvm->snif_sta, &mvm->snif_queue, + NULL, &mvm->snif_sta, + &mvm->snif_queue, IWL_MVM_TX_FIFO_BE); } @@ -2863,7 +2862,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; } else { tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; - ret = 0; + ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; } out: @@ -3903,3 +3902,43 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) return ieee80211_sn_sub(sn, tid_data->next_reclaimed); } + +int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, + u8 *key, u32 key_len) +{ + int ret; + u16 queue; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_key_conf *keyconf; + + ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, + NL80211_IFTYPE_UNSPECIFIED, + IWL_STA_LINK); + if (ret) + return ret; + + ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, + addr, sta, &queue, + IWL_MVM_TX_FIFO_BE); + if (ret) + goto out; + + keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); + if (!keyconf) { + ret = -ENOBUFS; + goto out; + } + + keyconf->cipher = cipher; + memcpy(keyconf->key, key, key_len); + keyconf->keylen = key_len; + + ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, + 0, NULL, 0, 0, true); + kfree(keyconf); + return 0; +out: + iwl_mvm_dealloc_int_sta(mvm, sta); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index da2d1ac01229..55dd2fb0a779 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -579,5 +579,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); +int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, + u8 *key, u32 key_len); #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 0c95663bf9ed..94e9b6de425e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -228,24 +228,67 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) iwl_mvm_enter_ctkill(mvm); } -static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) +/* + * send the DTS_MEASUREMENT_TRIGGER command with or without waiting for a + * response. If we get a response then the measurement is stored in 'temp' + */ +static int iwl_mvm_send_temp_cmd(struct iwl_mvm *mvm, bool response, s32 *temp) { - struct iwl_dts_measurement_cmd cmd = { + struct iwl_host_cmd cmd = {}; + struct iwl_dts_measurement_cmd dts_cmd = { .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), }; - struct iwl_ext_dts_measurement_cmd extcmd = { + struct iwl_ext_dts_measurement_cmd ext_cmd = { .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE), }; - u32 cmdid; + struct iwl_dts_measurement_resp *resp; + void *cmd_ptr; + int ret; + u32 cmd_flags = 0; + u16 len; + + /* Check which command format is used (regular/extended) */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) { + len = sizeof(ext_cmd); + cmd_ptr = &ext_cmd; + } else { + len = sizeof(dts_cmd); + cmd_ptr = &dts_cmd; + } + /* The command version where we get a response is zero length */ + if (response) { + cmd_flags = CMD_WANT_SKB; + len = 0; + } - cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE, - PHY_OPS_GROUP, 0); + cmd.id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE); + cmd.len[0] = len; + cmd.flags = cmd_flags; + cmd.data[0] = cmd_ptr; - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) - return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd); + IWL_DEBUG_TEMP(mvm, + "Sending temperature measurement command - %s response\n", + response ? "with" : "without"); + ret = iwl_mvm_send_cmd(mvm, &cmd); - return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); + if (ret) { + IWL_ERR(mvm, + "Failed to send the temperature measurement command (err=%d)\n", + ret); + return ret; + } + + if (response) { + resp = (void *)cmd.resp_pkt->data; + *temp = le32_to_cpu(resp->temp); + IWL_DEBUG_TEMP(mvm, + "Got temperature measurement response: temp=%d\n", + *temp); + iwl_free_resp(&cmd); + } + + return ret; } int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) @@ -254,6 +297,18 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE) }; int ret; + u8 cmd_ver; + + /* + * If command version is 1 we send the command and immediately get + * a response. For older versions we send the command and wait for a + * notification (no command TLV for previous versions). + */ + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, + CMD_DTS_MEASUREMENT_TRIGGER_WIDE, + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver == 1) + return iwl_mvm_send_temp_cmd(mvm, true, temp); lockdep_assert_held(&mvm->mutex); @@ -261,9 +316,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) temp_notif, ARRAY_SIZE(temp_notif), iwl_mvm_temp_notif_wait, temp); - ret = iwl_mvm_get_temp_cmd(mvm); + ret = iwl_mvm_send_temp_cmd(mvm, false, temp); if (ret) { - IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret); iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 2f6484e0d726..a372f32f4571 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1768,9 +1768,9 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, struct ieee80211_tx_info *ba_info, u32 rate) { struct sk_buff_head reclaimed_skbs; - struct iwl_mvm_tid_data *tid_data; + struct iwl_mvm_tid_data *tid_data = NULL; struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_sta *mvmsta = NULL; struct sk_buff *skb; int freed; @@ -1784,11 +1784,44 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* Reclaiming frames for a station that has been deleted ? */ - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + if (WARN_ON_ONCE(!sta)) { rcu_read_unlock(); return; } + __skb_queue_head_init(&reclaimed_skbs); + + /* + * Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). + */ + iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); + + skb_queue_walk(&reclaimed_skbs, skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + } + + /* + * It's possible to get a BA response after invalidating the rcu (rcu is + * invalidated in order to prevent new Tx from being sent, but there may + * be some frames already in-flight). + * In this case we just want to reclaim, and could skip all the + * sta-dependent stuff since it's in the middle of being removed + * anyways. + */ + if (IS_ERR(sta)) + goto out; + mvmsta = iwl_mvm_sta_from_mac80211(sta); tid_data = &mvmsta->tid_data[tid]; @@ -1800,15 +1833,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, return; } - __skb_queue_head_init(&reclaimed_skbs); - - /* - * Release all TFDs before the SSN, i.e. all TFDs in front of - * block-ack window (we assume that they've been successfully - * transmitted ... if not, it's too late anyway). - */ - iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); - spin_lock_bh(&mvmsta->lock); tid_data->next_reclaimed = index; @@ -1832,15 +1856,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, else WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); - iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); - - memset(&info->status, 0, sizeof(info->status)); - /* Packet was transmitted successfully, failures come as single - * frames because before failing a frame the firmware transmits - * it without aggregation at least once. - */ - info->flags |= IEEE80211_TX_STAT_ACK; - /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ if (freed == 1) { @@ -1917,8 +1932,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); - if (!mvmsta) - goto out_unlock; + /* + * It's possible to get a BA response after invalidating the rcu + * (rcu is invalidated in order to prevent new Tx from being + * sent, but there may be some frames already in-flight). + * In this case we just want to reclaim, and could skip all the + * sta-dependent stuff since it's in the middle of being removed + * anyways. + */ /* Free per TID */ for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) { @@ -1929,7 +1950,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; - mvmsta->tid_data[i].lq_color = lq_color; + if (mvmsta) + mvmsta->tid_data[i].lq_color = lq_color; + iwl_mvm_tx_reclaim(mvm, sta_id, tid, (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), @@ -1937,9 +1960,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) le32_to_cpu(ba_res->tx_rate)); } - iwl_mvm_tx_airtime(mvm, mvmsta, - le32_to_cpu(ba_res->wireless_time)); -out_unlock: + if (mvmsta) + iwl_mvm_tx_airtime(mvm, mvmsta, + le32_to_cpu(ba_res->wireless_time)); rcu_read_unlock(); out: IWL_DEBUG_TX_REPLY(mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index be57b8391850..ae39d81d74c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -67,6 +67,7 @@ #include "iwl-csr.h" #include "mvm.h" #include "fw/api/rs.h" +#include "fw/img.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless @@ -289,45 +290,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) return last_idx; } -#define FW_SYSASSERT_CPU_MASK 0xf0000000 -static const struct { - const char *name; - u8 num; -} advanced_lookup[] = { - { "NMI_INTERRUPT_WDG", 0x34 }, - { "SYSASSERT", 0x35 }, - { "UCODE_VERSION_MISMATCH", 0x37 }, - { "BAD_COMMAND", 0x38 }, - { "BAD_COMMAND", 0x39 }, - { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, - { "FATAL_ERROR", 0x3D }, - { "NMI_TRM_HW_ERR", 0x46 }, - { "NMI_INTERRUPT_TRM", 0x4C }, - { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, - { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, - { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, - { "NMI_INTERRUPT_HOST", 0x66 }, - { "NMI_INTERRUPT_LMAC_FATAL", 0x70 }, - { "NMI_INTERRUPT_UMAC_FATAL", 0x71 }, - { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 }, - { "NMI_INTERRUPT_ACTION_PT", 0x7C }, - { "NMI_INTERRUPT_UNKNOWN", 0x84 }, - { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, - { "ADVANCED_SYSASSERT", 0 }, -}; - -static const char *desc_lookup(u32 num) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) - if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK)) - return advanced_lookup[i].name; - - /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ - return advanced_lookup[i].name; -} - /* * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is @@ -463,7 +425,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) struct iwl_umac_error_event_table table; u32 base = mvm->trans->dbg.umac_error_event_table; - if (!mvm->support_umac_log && + if (!base && !(mvm->trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC)) return; @@ -480,7 +442,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) } IWL_ERR(mvm, "0x%08X | %s\n", table.error_id, - desc_lookup(table.error_id)); + iwl_fw_lookup_assert_desc(table.error_id)); IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); @@ -550,7 +512,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, - desc_lookup(table.error_id)); + iwl_fw_lookup_assert_desc(table.error_id)); IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 23abfbd096b0..2597faea79c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -73,7 +73,7 @@ static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, if (!result) return NULL; - if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) { + if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) { void *old = result; dma_addr_t oldphys = *phys; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e02bafb8921f..d7dfda909b93 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -540,20 +540,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)}, {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)}, + {IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)}, + {IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)}, + {IWL_PCI_DEVICE(0x2726, 0x0078, iwlax201_cfg_snj_hr_b0)}, + {IWL_PCI_DEVICE(0x2726, 0x007C, iwlax201_cfg_snj_hr_b0)}, {IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)}, + {IWL_PCI_DEVICE(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0)}, {IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)}, {IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)}, + {IWL_PCI_DEVICE(0x2726, 0x2074, iwlax201_cfg_snj_hr_b0)}, + {IWL_PCI_DEVICE(0x2726, 0x4070, iwlax201_cfg_snj_hr_b0)}, {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)}, + {IWL_PCI_DEVICE(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long)}, {IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)}, {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)}, {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)}, {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)}, {IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)}, + {IWL_PCI_DEVICE(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)}, {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)}, +/* Ma devices */ + {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, + {IWL_PCI_DEVICE(0x7E80, PCI_ANY_ID, iwl_ma_trans_cfg)}, + #endif /* CONFIG_IWLMVM */ {0} @@ -595,6 +608,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { /* QnJ with Hr */ IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), + /* SnJ with HR*/ + IWL_DEV_INFO(0x2726, 0x0244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name), + IWL_DEV_INFO(0x2726, 0x1651, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), + IWL_DEV_INFO(0x2726, 0x1652, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), + IWL_DEV_INFO(0x2726, 0x4244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name), + /* Qu with Hr */ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), @@ -955,6 +974,18 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_ANY, IWL_CFG_ANY, iwl_quz_a0_hr1_b0, iwl_ax101_name), +/* Ma */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_ma_a0_mr_a0, iwl_ma_name), + #endif /* CONFIG_IWLMVM */ }; @@ -987,9 +1018,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); - /* the trans_cfg should never change, so set it now */ - iwl_trans->trans_cfg = trans; - iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 55808ba10d27..033f845a89dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -79,12 +79,7 @@ #include "iwl-io.h" #include "iwl-op-mode.h" #include "iwl-drv.h" - -/* We need 2 entries for the TX command and header, and another one might - * be needed for potential data in the SKB's head. The remaining ones can - * be used for frags. - */ -#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) +#include "queue/tx.h" /* * RX related structures and functions @@ -247,16 +242,6 @@ struct iwl_rb_allocator { }; /** - * iwl_queue_inc_wrap - increment queue index, wrap back to beginning - * @index -- current index - */ -static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) -{ - return ++index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); -} - -/** * iwl_get_closed_rb_stts - get closed rb stts from different structs * @rxq - the rxq to get the rb stts from */ @@ -274,28 +259,6 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, } } -/** - * iwl_queue_dec_wrap - decrement queue index, wrap back to end - * @index -- current index - */ -static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) -{ - return --index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); -} - -static inline dma_addr_t -iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) -{ - return txq->first_tb_dma + - sizeof(struct iwl_pcie_first_tb_buf) * idx; -} - -struct iwl_tso_hdr_page { - struct page *page; - u8 *pos; -}; - #ifdef CONFIG_IWLWIFI_DEBUGFS /** * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data @@ -375,7 +338,6 @@ struct cont_rec { * count for allocating and freeing the memory. * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM - * @scd_bc_tbls: pointer to the byte count table of the scheduler * @kw: keep warm address * @pci_dev: basic pci-network driver stuff * @hw_base: pci hardware address support @@ -384,7 +346,6 @@ struct cont_rec { * @cmd_queue - command queue number * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @sw_csum_tx: if true, then the transport will compute the csum of the TXed * frame. @@ -434,8 +395,6 @@ struct iwl_trans_pcie { struct net_device napi_dev; - struct __percpu iwl_tso_hdr_page *tso_hdr_page; - /* INT ICT Table */ __le32 *ict_tbl; dma_addr_t ict_tbl_dma; @@ -449,9 +408,7 @@ struct iwl_trans_pcie { struct mutex mutex; u32 inta_mask; u32 scd_base_addr; - struct iwl_dma_ptr scd_bc_tbls; struct iwl_dma_ptr kw; - struct dma_pool *bc_pool; struct iwl_txq *txq_memory; @@ -465,17 +422,12 @@ struct iwl_trans_pcie { wait_queue_head_t wait_command_queue; wait_queue_head_t sx_waitq; - u8 page_offs, dev_cmd_offs; - u8 def_rx_queue; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; - u8 max_tbs; - u16 tfd_size; u16 num_rx_bufs; enum iwl_amsdu_size rx_buf_size; - bool bc_table_dword; bool scd_set_active; bool sw_csum_tx; bool pcie_dbg_dumped_once; @@ -579,19 +531,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ -/* - * We need this inline in case dma_addr_t is only 32-bits - since the - * hardware is always 64-bit, the issue can still occur in that case, - * so use u64 for 'phys' here to force the addition in 64-bit. - */ -static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len) -{ - return upper_32_bits(phys) != upper_32_bits(phys + len); -} - int iwl_pcie_tx_init(struct iwl_trans *trans); -int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, - int queue_size); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); @@ -602,14 +542,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode); -void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, - struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); -void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, - struct iwl_txq *txq); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, @@ -617,22 +553,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); -static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, - u8 idx) -{ - if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->tb_len); - } else { - struct iwl_tfd *tfd = _tfd; - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; - } -} - /***************************************************** * Error handling ******************************************************/ @@ -800,22 +720,6 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) } } -static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) -{ - return index & (q->n_window - 1); -} - -static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, int idx) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - if (trans->trans_cfg->use_tfh) - idx = iwl_pcie_get_cmd_index(txq, idx); - - return txq->tfds + trans_pcie->tfd_size * idx; -} - static inline const char *queue_name(struct device *dev, struct iwl_trans_pcie *trans_p, int i) { @@ -867,37 +771,6 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); -static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); - iwl_op_mode_queue_not_full(trans->op_mode, txq->id); - } -} - -static inline void iwl_stop_queue(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { - iwl_op_mode_queue_full(trans->op_mode, txq->id); - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); - } else - IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", - txq->id); -} - -static inline bool iwl_queue_used(const struct iwl_txq *q, int i) -{ - int index = iwl_pcie_get_cmd_index(q, i); - int r = iwl_pcie_get_cmd_index(q, q->read_ptr); - int w = iwl_pcie_get_cmd_index(q, q->write_ptr); - - return w >= r ? - (index >= r && index < w) : - !(index < r && index >= w); -} - static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -964,23 +837,12 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); void iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); -int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, bool cmd_queue); -int iwl_pcie_txq_alloc(struct iwl_trans *trans, - struct iwl_txq *txq, int slots_num, bool cmd_queue); int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); void iwl_pcie_apply_destination(struct iwl_trans *trans); -void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, - struct sk_buff *skb); -#ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, - struct sk_buff *skb); -#endif /* common functions that are used by gen3 transport */ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); @@ -989,28 +851,10 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); -void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, - struct iwl_txq *txq); -int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, - struct iwl_txq **intxq, int size, - unsigned int timeout); -int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_host_cmd *hcmd); -int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, - __le16 flags, u8 sta_id, u8 tid, - int cmd_id, int size, - unsigned int timeout); -void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); -int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); -void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); -void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); -void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, bool test, bool reset); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 9463c108aa96..94299f259518 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1359,7 +1359,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); - cmd_index = iwl_pcie_get_cmd_index(txq, index); + cmd_index = iwl_txq_get_cmd_index(txq, index); if (rxq->id == trans_pcie->def_rx_queue) iwl_op_mode_rx(trans->op_mode, &rxq->napi, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 97c9e9c87436..91ec9379c061 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -162,7 +162,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); - iwl_pcie_gen2_tx_stop(trans); + iwl_txq_gen2_tx_stop(trans); iwl_pcie_rx_stop(trans); } @@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size)) + if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index e5160d620868..4ee8b5dd2512 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1607,11 +1607,15 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_irqs, num_irqs, i, ret; u16 pci_cmd; + u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; if (!cfg_trans->mq_rx_supported) goto enable_msi; - max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); + if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) + max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; + + max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); for (i = 0; i < max_irqs; i++) trans_pcie->msix_entries[i].entry = i; @@ -1907,6 +1911,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans->txqs.cmd.q_id = trans_cfg->cmd_queue; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; + trans->txqs.page_offs = trans_cfg->cb_data_offs; + trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); + if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; else @@ -1924,13 +1931,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); - trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; + trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; - trans_pcie->page_offs = trans_cfg->cb_data_offs; - trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); - trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; @@ -1951,7 +1955,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); if (trans->trans_cfg->gen2) - iwl_pcie_gen2_tx_free(trans); + iwl_txq_gen2_tx_free(trans); else iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); @@ -1975,15 +1979,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_fw_monitor(trans); - for_each_possible_cpu(i) { - struct iwl_tso_hdr_page *p = - per_cpu_ptr(trans_pcie->tso_hdr_page, i); - - if (p->page) - __free_page(p->page); - } - - free_percpu(trans_pcie->tso_hdr_page); mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); } @@ -2276,36 +2271,6 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) #define IWL_FLUSH_WAIT_MS 2000 -void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) -{ - u32 txq_id = txq->id; - u32 status; - bool active; - u8 fifo; - - if (trans->trans_cfg->use_tfh) { - IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, - txq->read_ptr, txq->write_ptr); - /* TODO: access new SCD registers and dump them */ - return; - } - - status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); - fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - - IWL_ERR(trans, - "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", - txq_id, active ? "" : "in", fifo, - jiffies_to_msecs(txq->wd_timeout), - txq->read_ptr, txq->write_ptr, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), - iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); -} - static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data) { @@ -2374,7 +2339,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) if (txq->read_ptr != txq->write_ptr) { IWL_ERR(trans, "fail to flush all tx fifo queues Q %d\n", txq_idx); - iwl_trans_pcie_log_scd_error(trans, txq); + iwl_txq_log_scd_error(trans, txq); return -ETIMEDOUT; } @@ -2985,12 +2950,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 cmdlen = 0; int i; - for (i = 0; i < trans_pcie->max_tbs; i++) - cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i); + for (i = 0; i < trans->txqs.tfd.max_tbs; i++) + cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); return cmdlen; } @@ -3329,14 +3293,14 @@ static struct iwl_trans_dump_data data = (void *)dump_data->data; if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { - u16 tfd_size = trans_pcie->tfd_size; + u16 tfd_size = trans->txqs.tfd.size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; spin_lock_bh(&cmdq->lock); ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { - u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); + u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); u8 tfdidx; u32 caplen, cmdlen; @@ -3359,7 +3323,7 @@ static struct iwl_trans_dump_data txcmd = (void *)((u8 *)txcmd->data + caplen); } - ptr = iwl_queue_dec_wrap(trans, ptr); + ptr = iwl_txq_dec_wrap(trans, ptr); } spin_unlock_bh(&cmdq->lock); @@ -3478,13 +3442,13 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .send_cmd = iwl_trans_pcie_gen2_send_hcmd, - .tx = iwl_trans_pcie_gen2_tx, + .tx = iwl_txq_gen2_tx, .reclaim = iwl_trans_pcie_reclaim, .set_q_ptrs = iwl_trans_pcie_set_q_ptrs, - .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, - .txq_free = iwl_trans_pcie_dyn_txq_free, + .txq_alloc = iwl_txq_dyn_alloc, + .txq_free = iwl_txq_dyn_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -3498,34 +3462,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - int ret, addr_size, txcmd_size, txcmd_align; + int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; - if (!cfg_trans->gen2) { + if (!cfg_trans->gen2) ops = &trans_ops_pcie; - txcmd_size = sizeof(struct iwl_tx_cmd); - txcmd_align = sizeof(void *); - } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { - txcmd_size = sizeof(struct iwl_tx_cmd_gen2); - txcmd_align = 64; - } else { - txcmd_size = sizeof(struct iwl_tx_cmd_gen3); - txcmd_align = 128; - } - - txcmd_size += sizeof(struct iwl_cmd_header); - txcmd_size += 36; /* biggest possible 802.11 header */ - - /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ - if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) - return ERR_PTR(-EINVAL); ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, - txcmd_size, txcmd_align); + cfg_trans); if (!trans) return ERR_PTR(-ENOMEM); @@ -3547,11 +3495,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); - trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); - if (!trans_pcie->tso_hdr_page) { - ret = -ENOMEM; - goto out_no_pci; - } trans_pcie->debug_rfkill = -1; if (!cfg_trans->base_params->pcie_l1_allowed) { @@ -3567,19 +3510,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->def_rx_queue = 0; - if (cfg_trans->use_tfh) { - addr_size = 64; - trans_pcie->max_tbs = IWL_TFH_NUM_TBS; - trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); - } else { - addr_size = 36; - trans_pcie->max_tbs = IWL_NUM_OF_TBS; - trans_pcie->tfd_size = sizeof(struct iwl_tfd); - } - trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); - pci_set_master(pdev); + addr_size = trans->txqs.tfd.addr_size; ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); if (!ret) ret = pci_set_consistent_dma_mask(pdev, @@ -3661,24 +3594,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->sx_waitq); - /* - * For gen2 devices, we use a single allocation for each byte-count - * table, but they're pretty small (1k) so use a DMA pool that we - * allocate here. - */ - if (cfg_trans->gen2) { - size_t bc_tbl_size; - - if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210) - bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); - else - bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); - - trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev, - bc_tbl_size, 256, 0); - if (!trans_pcie->bc_pool) - goto out_no_pci; - } if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); @@ -3712,7 +3627,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: - free_percpu(trans_pcie->tso_hdr_page); destroy_workqueue(trans_pcie->rba.alloc_wq); out_free_trans: iwl_trans_free(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 606bef2ecc7b..baa83a0b8593 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -58,751 +58,7 @@ #include "iwl-io.h" #include "internal.h" #include "fw/api/tx.h" - - /* - * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels - */ -void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) -{ - int txq_id; - - /* - * This function can be called before the op_mode disabled the - * queues. This happens when we have an rfkill interrupt. - * Since we stop Tx altogether - mark the queues as stopped. - */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); - - /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { - if (!trans->txqs.txq[txq_id]) - continue; - iwl_pcie_gen2_txq_unmap(trans, txq_id); - } -} - -/* - * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array - */ -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - u8 filled_tfd_size, num_fetch_chunks; - u16 len = byte_cnt; - __le16 bc_ent; - - if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) - return; - - filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + - num_tbs * sizeof(struct iwl_tfh_tb); - /* - * filled_tfd_size contains the number of filled bytes in the TFD. - * Dividing it by 64 will give the number of chunks to fetch - * to SRAM- 0 for one chunk, 1 for 2 and so on. - * If, for example, TFD contains only 3 TBs then 32 bytes - * of the TFD are used, and only one chunk of 64 bytes should - * be fetched - */ - num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; - - /* Starting from AX210, the HW expects bytes */ - WARN_ON(trans_pcie->bc_table_dword); - WARN_ON(len > 0x3FFF); - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); - scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; - } else { - struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - - /* Before AX210, the HW expects DW */ - WARN_ON(!trans_pcie->bc_table_dword); - len = DIV_ROUND_UP(len, 4); - WARN_ON(len > 0xFFF); - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[idx] = bc_ent; - } -} - -/* - * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware - */ -void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); - - /* - * if not in power-save mode, uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); -} - -static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd) -{ - return le16_to_cpu(tfd->num_tbs) & 0x1f; -} - -static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_tfh_tfd *tfd) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int i, num_tbs; - - /* Sanity check on number of chunks */ - num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); - - if (num_tbs > trans_pcie->max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - le64_to_cpu(tfd->tbs[i].addr), - le16_to_cpu(tfd->tbs[i].tb_len), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - le64_to_cpu(tfd->tbs[i].addr), - le16_to_cpu(tfd->tbs[i].tb_len), - DMA_TO_DEVICE); - } - - tfd->num_tbs = 0; -} - -static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - - lockdep_assert_held(&txq->lock); - - iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_pcie_get_tfd(trans, txq, idx)); - - /* free SKB */ - if (txq->entries) { - struct sk_buff *skb; - - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } - } -} - -static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd, dma_addr_t addr, - u16 len) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb; - - /* - * Only WARN here so we know about the issue, but we mess up our - * unmap path because not every place currently checks for errors - * returned from this function - it can only return an error if - * there's no more space, and so when we know there is enough we - * don't always check ... - */ - WARN(iwl_pcie_crosses_4g_boundary(addr, len), - "possible DMA problem with iova:0x%llx, len:%d\n", - (unsigned long long)addr, len); - - if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) - return -EINVAL; - tb = &tfd->tbs[idx]; - - /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { - IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans_pcie->max_tbs); - return -EINVAL; - } - - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); - - tfd->num_tbs = cpu_to_le16(idx + 1); - - return idx; -} - -static struct page *get_workaround_page(struct iwl_trans *trans, - struct sk_buff *skb) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct page **page_ptr; - struct page *ret; - - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - - ret = alloc_page(GFP_ATOMIC); - if (!ret) - return NULL; - - /* set the chaining pointer to the previous page if there */ - *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; - *page_ptr = ret; - - return ret; -} - -/* - * Add a TB and if needed apply the FH HW bug workaround; - * meta != NULL indicates that it's a page mapping and we - * need to dma_unmap_page() and set the meta->tbs bit in - * this case. - */ -static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, - dma_addr_t phys, void *virt, - u16 len, struct iwl_cmd_meta *meta) -{ - dma_addr_t oldphys = phys; - struct page *page; - int ret; - - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; - - if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) { - ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); - - if (ret < 0) - goto unmap; - - if (meta) - meta->tbs |= BIT(ret); - - ret = 0; - goto trace; - } - - /* - * Work around a hardware bug. If (as expressed in the - * condition above) the TB ends on a 32-bit boundary, - * then the next TB may be accessed with the wrong - * address. - * To work around it, copy the data elsewhere and make - * a new mapping for it so the device will not fail. - */ - - if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { - ret = -ENOBUFS; - goto unmap; - } - - page = get_workaround_page(trans, skb); - if (!page) { - ret = -ENOMEM; - goto unmap; - } - - memcpy(page_address(page), virt, len); - - phys = dma_map_single(trans->dev, page_address(page), len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; - ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); - if (ret < 0) { - /* unmap the new allocation as single */ - oldphys = phys; - meta = NULL; - goto unmap; - } - IWL_WARN(trans, - "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", - len, (unsigned long long)oldphys, (unsigned long long)phys); - - ret = 0; -unmap: - if (meta) - dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); -trace: - trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); - - return ret; -} - -static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, - struct iwl_device_tx_cmd *dev_cmd) -{ -#ifdef CONFIG_INET - struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; - struct ieee80211_hdr *hdr = (void *)skb->data; - unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; - unsigned int mss = skb_shinfo(skb)->gso_size; - u16 length, amsdu_pad; - u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; - struct tso_t tso; - - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), - &dev_cmd->hdr, start_len, 0); - - ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); - snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); - total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; - amsdu_pad = 0; - - /* total amount of header we may need for this A-MSDU */ - hdr_room = DIV_ROUND_UP(total_len, mss) * - (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); - - /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) - return -ENOMEM; - - start_hdr = hdr_page->pos; - - /* - * Pull the ieee80211 header to be able to use TSO core, - * we will restore it for the tx_status flow. - */ - skb_pull(skb, hdr_len); - - /* - * Remove the length of all the headers that we don't actually - * have in the MPDU by themselves, but that we duplicate into - * all the different MSDUs inside the A-MSDU. - */ - le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); - - tso_start(skb, &tso); - - while (total_len) { - /* this is the data left for this subframe */ - unsigned int data_left = min_t(unsigned int, mss, total_len); - struct sk_buff *csum_skb = NULL; - unsigned int tb_len; - dma_addr_t tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; - - total_len -= data_left; - - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; - amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + - data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; - - length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); - - /* - * This will copy the SNAP as well which will be considered - * as MAC header. - */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); - - hdr_page->pos += snap_ip_tcp_hdrlen; - - tb_len = hdr_page->pos - start_hdr; - tb_phys = dma_map_single(trans->dev, start_hdr, - tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - dev_kfree_skb(csum_skb); - goto out_err; - } - /* - * No need for _with_wa, this is from the TSO page and - * we leave some space at the end of it so can't hit - * the buggy scenario. - */ - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, - tb_phys, tb_len); - /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); - - /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; - - /* put the payload */ - while (data_left) { - int ret; - - tb_len = min_t(unsigned int, tso.size, data_left); - tb_phys = dma_map_single(trans->dev, tso.data, - tb_len, DMA_TO_DEVICE); - ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, - tb_phys, tso.data, - tb_len, NULL); - if (ret) { - dev_kfree_skb(csum_skb); - goto out_err; - } - - data_left -= tb_len; - tso_build_data(skb, &tso, tb_len); - } - } - - /* re -add the WiFi header */ - skb_push(skb, hdr_len); - - return 0; - -out_err: -#endif - return -EINVAL; -} - -static struct -iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta, - int hdr_len, - int tx_cmd_len) -{ - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); - dma_addr_t tb_phys; - int len; - void *tb1_addr; - - tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); - - /* - * No need for _with_wa, the first TB allocation is aligned up - * to a 64-byte boundary and thus can't be at the end or cross - * a page boundary (much less a 2^32 boundary). - */ - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - - IWL_FIRST_TB_SIZE; - - /* do not align A-MSDU to dword as the subframe header aligns it */ - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; - tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa(), we ensure (via alignment) that the data - * here can never cross or end at a page boundary. - */ - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); - - if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, - len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) - goto out_err; - - /* building the A-MSDU might have changed this data, memcpy it now */ - memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); - return tfd; - -out_err: - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); - return NULL; -} - -static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, - struct iwl_cmd_meta *out_meta) -{ - int i; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr_t tb_phys; - unsigned int fragsz = skb_frag_size(frag); - int ret; - - if (!fragsz) - continue; - - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - fragsz, DMA_TO_DEVICE); - ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - skb_frag_address(frag), - fragsz, out_meta); - if (ret) - return ret; - } - - return 0; -} - -static struct -iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta, - int hdr_len, - int tx_cmd_len, - bool pad) -{ - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); - dma_addr_t tb_phys; - int len, tb1_len, tb2_len; - void *tb1_addr; - struct sk_buff *frag; - - tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); - - /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); - - /* - * No need for _with_wa, the first TB allocation is aligned up - * to a 64-byte boundary and thus can't be at the end or cross - * a page boundary (much less a 2^32 boundary). - */ - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - - IWL_FIRST_TB_SIZE; - - if (pad) - tb1_len = ALIGN(len, 4); - else - tb1_len = len; - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; - tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa(), we ensure (via alignment) that the data - * here can never cross or end at a page boundary. - */ - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, - IWL_FIRST_TB_SIZE + tb1_len, hdr_len); - - /* set up TFD's third entry to point to remainder of skb's head */ - tb2_len = skb_headlen(skb) - hdr_len; - - if (tb2_len > 0) { - int ret; - - tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - skb->data + hdr_len, tb2_len, - NULL); - if (ret) - goto out_err; - } - - if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) - goto out_err; - - skb_walk_frags(skb, frag) { - int ret; - - tb_phys = dma_map_single(trans->dev, frag->data, - skb_headlen(frag), DMA_TO_DEVICE); - ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - frag->data, - skb_headlen(frag), NULL); - if (ret) - goto out_err; - if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta)) - goto out_err; - } - - return tfd; - -out_err: - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); - return NULL; -} - -static -struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); - int len, hdr_len; - bool amsdu; - - /* There must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); - - memset(tfd, 0, sizeof(*tfd)); - - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) - len = sizeof(struct iwl_tx_cmd_gen2); - else - len = sizeof(struct iwl_tx_cmd_gen3); - - amsdu = ieee80211_is_data_qos(hdr->frame_control) && - (*ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_A_MSDU_PRESENT); - - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - /* - * Only build A-MSDUs here if doing so by GSO, otherwise it may be - * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been - * built in the higher layers already. - */ - if (amsdu && skb_shinfo(skb)->gso_size) - return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, - out_meta, hdr_len, len); - - return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, - hdr_len, len, !amsdu); -} - -int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_cmd_meta *out_meta; - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - u16 cmd_len; - int idx; - void *tfd; - - if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", txq_id)) - return -EINVAL; - - if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), - "TX on unused queue %d\n", txq_id)) - return -EINVAL; - - if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && - __skb_linearize(skb)) - return -ENOMEM; - - spin_lock(&txq->lock); - - if (iwl_queue_space(trans, txq) < txq->high_mark) { - iwl_stop_queue(trans, txq); - - /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_tx_cmd **dev_cmd_ptr; - - dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans_pcie->dev_cmd_offs); - - *dev_cmd_ptr = dev_cmd; - __skb_queue_tail(&txq->overflow_q, skb); - spin_unlock(&txq->lock); - return 0; - } - } - - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - - /* Set up driver data for this TFD */ - txq->entries[idx].skb = skb; - txq->entries[idx].cmd = dev_cmd; - - dev_cmd->hdr.sequence = - cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(idx))); - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[idx].meta; - out_meta->flags = 0; - - tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); - if (!tfd) { - spin_unlock(&txq->lock); - return -1; - } - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen3->len); - } else { - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen2->len); - } - - /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, - iwl_pcie_gen2_get_num_tbs(trans, tfd)); - - /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - - /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); - iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); - /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually. - */ - spin_unlock(&txq->lock); - return 0; -} +#include "queue/tx.h" /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -902,11 +158,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); - tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); - if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -984,8 +240,8 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); - iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), - tb0_size); + iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx), + tb0_size); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { @@ -995,11 +251,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { idx = -ENOMEM; - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); goto out; } - iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, - copy_size - tb0_size); + iwl_txq_gen2_set_tb(trans, tfd, phys_addr, + copy_size - tb0_size); } /* map the remaining (adjusted) nocopy/dup fragments */ @@ -1017,10 +273,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { idx = -ENOMEM; - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); goto out; } - iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); + iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); } BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); @@ -1037,8 +293,8 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_lock_irqsave(&trans_pcie->reg_lock, flags); /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); - iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + iwl_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: @@ -1169,322 +425,3 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); } -/* - * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's - */ -void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - - spin_lock_bh(&txq->lock); - while (txq->write_ptr != txq->read_ptr) { - IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", - txq_id, txq->read_ptr); - - if (txq_id != trans->txqs.cmd.q_id) { - int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb = txq->entries[idx].skb; - - if (WARN_ON_ONCE(!skb)) - continue; - - iwl_pcie_free_tso_page(trans_pcie, skb); - } - iwl_pcie_gen2_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); - } - - while (!skb_queue_empty(&txq->overflow_q)) { - struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); - - iwl_op_mode_free_skb(trans->op_mode, skb); - } - - spin_unlock_bh(&txq->lock); - - /* just in case - this queue may have been stopped */ - iwl_wake_queue(trans, txq); -} - -void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct device *dev = trans->dev; - - /* De-alloc circular buffer of TFDs */ - if (txq->tfds) { - dma_free_coherent(dev, - trans_pcie->tfd_size * txq->n_window, - txq->tfds, txq->dma_addr); - dma_free_coherent(dev, - sizeof(*txq->first_tb_bufs) * txq->n_window, - txq->first_tb_bufs, txq->first_tb_dma); - } - - kfree(txq->entries); - if (txq->bc_tbl.addr) - dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr, - txq->bc_tbl.dma); - kfree(txq); -} - -/* - * iwl_pcie_txq_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) -{ - struct iwl_txq *txq; - int i; - - if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", txq_id)) - return; - - txq = trans->txqs.txq[txq_id]; - - if (WARN_ON(!txq)) - return; - - iwl_pcie_gen2_txq_unmap(trans, txq_id); - - /* De-alloc array of command/tx buffers */ - if (txq_id == trans->txqs.cmd.q_id) - for (i = 0; i < txq->n_window; i++) { - kfree_sensitive(txq->entries[i].cmd); - kfree_sensitive(txq->entries[i].free_buf); - } - del_timer_sync(&txq->stuck_timer); - - iwl_pcie_gen2_txq_free_memory(trans, txq); - - trans->txqs.txq[txq_id] = NULL; - - clear_bit(txq_id, trans->txqs.queue_used); -} - -int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans, - struct iwl_txq **intxq, int size, - unsigned int timeout) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t bc_tbl_size, bc_tbl_entries; - struct iwl_txq *txq; - int ret; - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); - bc_tbl_entries = bc_tbl_size / sizeof(u16); - } else { - bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); - bc_tbl_entries = bc_tbl_size / sizeof(u16); - } - - if (WARN_ON(size > bc_tbl_entries)) - return -EINVAL; - - txq = kzalloc(sizeof(*txq), GFP_KERNEL); - if (!txq) - return -ENOMEM; - - txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL, - &txq->bc_tbl.dma); - if (!txq->bc_tbl.addr) { - IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); - kfree(txq); - return -ENOMEM; - } - - ret = iwl_pcie_txq_alloc(trans, txq, size, false); - if (ret) { - IWL_ERR(trans, "Tx queue alloc failed\n"); - goto error; - } - ret = iwl_pcie_txq_init(trans, txq, size, false); - if (ret) { - IWL_ERR(trans, "Tx queue init failed\n"); - goto error; - } - - txq->wd_timeout = msecs_to_jiffies(timeout); - - *intxq = txq; - return 0; - -error: - iwl_pcie_gen2_txq_free_memory(trans, txq); - return ret; -} - -int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_host_cmd *hcmd) -{ - struct iwl_tx_queue_cfg_rsp *rsp; - int ret, qid; - u32 wr_ptr; - - if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != - sizeof(*rsp))) { - ret = -EINVAL; - goto error_free_resp; - } - - rsp = (void *)hcmd->resp_pkt->data; - qid = le16_to_cpu(rsp->queue_number); - wr_ptr = le16_to_cpu(rsp->write_pointer); - - if (qid >= ARRAY_SIZE(trans->txqs.txq)) { - WARN_ONCE(1, "queue index %d unsupported", qid); - ret = -EIO; - goto error_free_resp; - } - - if (test_and_set_bit(qid, trans->txqs.queue_used)) { - WARN_ONCE(1, "queue %d already used", qid); - ret = -EIO; - goto error_free_resp; - } - - txq->id = qid; - trans->txqs.txq[qid] = txq; - wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); - - /* Place first TFD at index corresponding to start sequence number */ - txq->read_ptr = wr_ptr; - txq->write_ptr = wr_ptr; - - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); - - iwl_free_resp(hcmd); - return qid; - -error_free_resp: - iwl_free_resp(hcmd); - iwl_pcie_gen2_txq_free_memory(trans, txq); - return ret; -} - -int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, - __le16 flags, u8 sta_id, u8 tid, - int cmd_id, int size, - unsigned int timeout) -{ - struct iwl_txq *txq = NULL; - struct iwl_tx_queue_cfg_cmd cmd = { - .flags = flags, - .sta_id = sta_id, - .tid = tid, - }; - struct iwl_host_cmd hcmd = { - .id = cmd_id, - .len = { sizeof(cmd) }, - .data = { &cmd, }, - .flags = CMD_WANT_SKB, - }; - int ret; - - ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout); - if (ret) - return ret; - - cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); - cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); - - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) - goto error; - - return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd); - -error: - iwl_pcie_gen2_txq_free_memory(trans, txq); - return ret; -} - -void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) -{ - if (WARN(queue >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", queue)) - return; - - /* - * Upon HW Rfkill - we stop the device, and then stop the queues - * in the op_mode. Just for the sake of the simplicity of the op_mode, - * allow the op_mode to call txq_disable after it already called - * stop_device. - */ - if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { - WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), - "queue %d not used", queue); - return; - } - - iwl_pcie_gen2_txq_unmap(trans, queue); - - iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]); - trans->txqs.txq[queue] = NULL; - - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); -} - -void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) -{ - int i; - - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); - - /* Free all TX queues */ - for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { - if (!trans->txqs.txq[i]) - continue; - - iwl_pcie_gen2_txq_free(trans, i); - } -} - -int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size) -{ - struct iwl_txq *queue; - int ret; - - /* alloc and init the tx queue */ - if (!trans->txqs.txq[txq_id]) { - queue = kzalloc(sizeof(*queue), GFP_KERNEL); - if (!queue) { - IWL_ERR(trans, "Not enough memory for tx queue\n"); - return -ENOMEM; - } - trans->txqs.txq[txq_id] = queue; - ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); - if (ret) { - IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); - goto error; - } - } else { - queue = trans->txqs.txq[txq_id]; - } - - ret = iwl_pcie_txq_init(trans, queue, queue_size, - (txq_id == trans->txqs.cmd.q_id)); - if (ret) { - IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); - goto error; - } - trans->txqs.txq[txq_id]->id = txq_id; - set_bit(txq_id, trans->txqs.queue_used); - - return 0; - -error: - iwl_pcie_gen2_tx_free(trans); - return ret; -} - diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index eb396c06b7fb..8c89e4a2f7f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -77,9 +77,6 @@ #include "internal.h" #include "fw/api/tx.h" -#define IWL_TX_CRC_SIZE 4 -#define IWL_TX_DELIMITER_SIZE 4 - /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services * @@ -102,60 +99,6 @@ * ***************************************************/ -int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) -{ - unsigned int max; - unsigned int used; - - /* - * To avoid ambiguity between empty and completely full queues, there - * should always be less than max_tfd_queue_size elements in the queue. - * If q->n_window is smaller than max_tfd_queue_size, there is no need - * to reserve any queue entries for this purpose. - */ - if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) - max = q->n_window; - else - max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; - - /* - * max_tfd_queue_size is a power of 2, so the following is equivalent to - * modulo by max_tfd_queue_size and is well defined. - */ - used = (q->write_ptr - q->read_ptr) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); - - if (WARN_ON(used > max)) - return 0; - - return max - used; -} - -/* - * iwl_queue_init - Initialize queue's high/low-water and read/write indexes - */ -static int iwl_queue_init(struct iwl_txq *q, int slots_num) -{ - q->n_window = slots_num; - - /* slots_num must be power-of-two size, otherwise - * iwl_pcie_get_cmd_index is broken. */ - if (WARN_ON(!is_power_of_2(slots_num))) - return -EINVAL; - - q->low_mark = q->n_window / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_window / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = 0; - q->read_ptr = 0; - - return 0; -} int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size) @@ -180,99 +123,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) memset(ptr, 0, sizeof(*ptr)); } -static void iwl_pcie_txq_stuck_timer(struct timer_list *t) -{ - struct iwl_txq *txq = from_timer(txq, t, stuck_timer); - struct iwl_trans *trans = txq->trans; - - spin_lock(&txq->lock); - /* check if triggered erroneously */ - if (txq->read_ptr == txq->write_ptr) { - spin_unlock(&txq->lock); - return; - } - spin_unlock(&txq->lock); - - iwl_trans_pcie_log_scd_error(trans, txq); - - iwl_force_nmi(trans); -} - -/* - * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int write_ptr = txq->write_ptr; - int txq_id = txq->id; - u8 sec_ctl = 0; - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - u8 sta_id = tx_cmd->sta_id; - - scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - - sec_ctl = tx_cmd->sec_ctl; - - switch (sec_ctl & TX_CMD_SEC_MSK) { - case TX_CMD_SEC_CCM: - len += IEEE80211_CCMP_MIC_LEN; - break; - case TX_CMD_SEC_TKIP: - len += IEEE80211_TKIP_ICV_LEN; - break; - case TX_CMD_SEC_WEP: - len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; - break; - } - if (trans_pcie->bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) - return; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; -} - -static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - int txq_id = txq->id; - int read_ptr = txq->read_ptr; - u8 sta_id = 0; - __le16 bc_ent; - struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - - if (txq_id != trans->txqs.cmd.q_id) - sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(1 | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; - - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; -} - /* * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ @@ -339,35 +189,6 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) } } -static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, - void *_tfd, u8 idx) -{ - - if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; - - return (dma_addr_t)(le64_to_cpu(tb->addr)); - } else { - struct iwl_tfd *tfd = _tfd; - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - dma_addr_t addr = get_unaligned_le32(&tb->lo); - dma_addr_t hi_len; - - if (sizeof(dma_addr_t) <= sizeof(u32)) - return addr; - - hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; - - /* - * shift by 16 twice to avoid warnings on 32-bit - * (where this code never runs anyway due to the - * if statement above) - */ - return addr | ((hi_len << 16) << 16); - } -} - static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, u8 idx, dma_addr_t addr, u16 len) { @@ -384,67 +205,6 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, tfd_fh->num_tbs = idx + 1; } -static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) -{ - if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - - return le16_to_cpu(tfd->num_tbs) & 0x1f; - } else { - struct iwl_tfd *tfd = _tfd; - - return tfd->num_tbs & 0x1f; - } -} - -static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_txq *txq, int index) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int i, num_tbs; - void *tfd = iwl_pcie_get_tfd(trans, txq, index); - - /* Sanity check on number of chunks */ - num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); - - if (num_tbs > trans_pcie->max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - iwl_pcie_tfd_tb_get_addr(trans, tfd, i), - iwl_pcie_tfd_tb_get_len(trans, tfd, i), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - iwl_pcie_tfd_tb_get_addr(trans, tfd, - i), - iwl_pcie_tfd_tb_get_len(trans, tfd, - i), - DMA_TO_DEVICE); - } - - meta->tbs = 0; - - if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } else { - struct iwl_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } - -} - /* * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans - transport private data @@ -460,14 +220,14 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; - int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); + int idx = iwl_txq_get_cmd_index(txq, rd_ptr); lockdep_assert_held(&txq->lock); /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ if (txq->entries) { @@ -489,21 +249,20 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); void *tfd; u32 num_tbs; - tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; + tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; if (reset) - memset(tfd, 0, trans_pcie->tfd_size); + memset(tfd, 0, trans->txqs.tfd.size); - num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (num_tbs >= trans_pcie->max_tbs) { + if (num_tbs >= trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans_pcie->max_tbs); + trans->txqs.tfd.max_tbs); return -EINVAL; } @@ -516,126 +275,6 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, return num_tbs; } -int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, bool cmd_queue) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t tfd_sz = trans_pcie->tfd_size * - trans->trans_cfg->base_params->max_tfd_queue_size; - size_t tb0_buf_sz; - int i; - - if (WARN_ON(txq->entries || txq->tfds)) - return -EINVAL; - - if (trans->trans_cfg->use_tfh) - tfd_sz = trans_pcie->tfd_size * slots_num; - - timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); - txq->trans = trans; - - txq->n_window = slots_num; - - txq->entries = kcalloc(slots_num, - sizeof(struct iwl_pcie_txq_entry), - GFP_KERNEL); - - if (!txq->entries) - goto error; - - if (cmd_queue) - for (i = 0; i < slots_num; i++) { - txq->entries[i].cmd = - kmalloc(sizeof(struct iwl_device_cmd), - GFP_KERNEL); - if (!txq->entries[i].cmd) - goto error; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, - &txq->dma_addr, GFP_KERNEL); - if (!txq->tfds) - goto error; - - BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); - - tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; - - txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, - &txq->first_tb_dma, - GFP_KERNEL); - if (!txq->first_tb_bufs) - goto err_free_tfds; - - return 0; -err_free_tfds: - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); -error: - if (txq->entries && cmd_queue) - for (i = 0; i < slots_num; i++) - kfree(txq->entries[i].cmd); - kfree(txq->entries); - txq->entries = NULL; - - return -ENOMEM; - -} - -int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, bool cmd_queue) -{ - int ret; - u32 tfd_queue_max_size = - trans->trans_cfg->base_params->max_tfd_queue_size; - - txq->need_update = false; - - /* max_tfd_queue_size must be power-of-two size, otherwise - * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ - if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), - "Max tfd queue size must be a power of two, but is %d", - tfd_queue_max_size)) - return -EINVAL; - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(txq, slots_num); - if (ret) - return ret; - - spin_lock_init(&txq->lock); - - if (cmd_queue) { - static struct lock_class_key iwl_pcie_cmd_queue_lock_class; - - lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); - } - - __skb_queue_head_init(&txq->overflow_q); - - return 0; -} - -void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, - struct sk_buff *skb) -{ - struct page **page_ptr; - struct page *next; - - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - next = *page_ptr; - *page_ptr = NULL; - - while (next) { - struct page *tmp = next; - - next = *(void **)(page_address(next) + PAGE_SIZE - - sizeof(void *)); - __free_page(tmp); - } -} - static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -671,10 +310,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(trans_pcie, skb); + iwl_txq_free_tso_page(trans, skb); } iwl_pcie_txq_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { unsigned long flags; @@ -708,7 +347,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) */ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct device *dev = trans->dev; int i; @@ -728,7 +366,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans_pcie->tfd_size * + trans->txqs.tfd.size * trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; @@ -774,7 +412,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, - trans_pcie->scd_bc_tbls.dma >> 10); + trans->txqs.scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. @@ -939,7 +577,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); - iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); + iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); } /* @@ -965,7 +603,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, + ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); @@ -1000,8 +638,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_256_ba_txq_size); trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; - ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id], - slots_num, cmd_queue); + ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, + cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; @@ -1053,8 +691,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_256_ba_txq_size); - ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id], - slots_num, cmd_queue); + ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, + cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; @@ -1111,10 +749,9 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; - int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); - int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + int tfd_num = iwl_txq_get_cmd_index(txq, ssn); + int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ @@ -1137,9 +774,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(trans, tfd_num); + last_to_free = iwl_txq_dec_wrap(trans, tfd_num); - if (!iwl_queue_used(txq, last_to_free)) { + if (!iwl_txq_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, @@ -1153,28 +790,28 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, for (; read_ptr != tfd_num; - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), - read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(trans_pcie, skb); + iwl_txq_free_tso_page(trans, skb); __skb_queue_tail(skbs, skb); txq->entries[read_ptr].skb = NULL; if (!trans->trans_cfg->use_tfh) - iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_free_tfd(trans, txq); } iwl_pcie_txq_progress(txq); - if (iwl_queue_space(trans, txq) > txq->low_mark && + if (iwl_txq_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; @@ -1204,17 +841,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans_pcie->dev_cmd_offs); + trans->txqs.dev_cmd_offs); /* * Note that we can very well be overflowing again. - * In that case, iwl_queue_space will be small again + * In that case, iwl_txq_space will be small again * and we won't wake mac80211's queue. */ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } - if (iwl_queue_space(trans, txq) > txq->low_mark) + if (iwl_txq_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); spin_lock_bh(&txq->lock); @@ -1295,11 +932,11 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) lockdep_assert_held(&txq->lock); - idx = iwl_pcie_get_cmd_index(txq, idx); - r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + idx = iwl_txq_get_cmd_index(txq, idx); + r = iwl_txq_get_cmd_index(txq, txq->read_ptr); if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || - (!iwl_queue_used(txq, idx))) { + (!iwl_txq_used(txq, idx))) { WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, @@ -1308,9 +945,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) return; } - for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; - r = iwl_queue_inc_wrap(trans, r)) { - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); + for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; + r = iwl_txq_inc_wrap(trans, r)) { + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", @@ -1543,11 +1180,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - if (WARN(!trans->wide_cmd_header && - group_id > IWL_ALWAYS_LONG_GROUP, - "unsupported wide command %#x\n", cmd->id)) - return -EINVAL; - if (group_id != 0) { copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); @@ -1627,7 +1259,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -1636,7 +1268,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -1719,7 +1351,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); iwl_pcie_txq_build_tfd(trans, txq, - iwl_pcie_get_first_tb_dma(txq, idx), + iwl_txq_get_first_tb_dma(txq, idx), tb0_size, true); /* map first command fragment, if any remains */ @@ -1729,8 +1361,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); idx = -ENOMEM; goto out; } @@ -1753,8 +1385,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, phys_addr = dma_map_single(trans->dev, (void *)data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); idx = -ENOMEM; goto out; } @@ -1783,7 +1415,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -1828,13 +1460,13 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - cmd_index = iwl_pcie_get_cmd_index(txq, index); + cmd_index = iwl_txq_get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); - iwl_pcie_tfd_unmap(trans, meta, txq, index); + iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { @@ -2055,51 +1687,6 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, - struct sk_buff *skb) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); - struct page **page_ptr; - - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - - if (WARN_ON(*page_ptr)) - return NULL; - - if (!p->page) - goto alloc; - - /* - * Check if there's enough room on this page - * - * Note that we put a page chaining pointer *last* in the - * page - we need it somewhere, and if it's there then we - * avoid DMA mapping the last bits of the page which may - * trigger the 32-bit boundary hardware bug. - * - * (see also get_workaround_page() in tx-gen2.c) - */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - - sizeof(void *)) - goto out; - - /* We don't have enough room on this page, get a new one. */ - __free_page(p->page); - -alloc: - p->page = alloc_page(GFP_ATOMIC); - if (!p->page) - return NULL; - p->pos = page_address(p->page); - /* set the chaining pointer to NULL */ - *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; -out: - *page_ptr = p->page; - get_page(p->page); - return p; -} - static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, bool ipv6, unsigned int len) { @@ -2142,8 +1729,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans, txq, txq->write_ptr), - trans_pcie->tfd_size, + iwl_txq_get_tfd(trans, txq, txq->write_ptr), + trans->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); @@ -2352,7 +1939,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && __skb_linearize(skb)) return -ENOMEM; @@ -2365,15 +1952,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (iwl_queue_space(trans, txq) < txq->high_mark) { - iwl_stop_queue(trans, txq); + if (iwl_txq_space(trans, txq) < txq->high_mark) { + iwl_txq_stop(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(trans, txq) < 3)) { + if (unlikely(iwl_txq_space(trans, txq) < 3)) { struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans_pcie->dev_cmd_offs); + trans->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -2402,7 +1989,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(txq->write_ptr))); - tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); + tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); @@ -2452,9 +2039,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans, txq, - txq->write_ptr), - trans_pcie->tfd_size, + iwl_txq_get_tfd(trans, txq, txq->write_ptr), + trans->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -2486,10 +2072,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* building the A-MSDU might have changed this data, so memcpy it now */ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); - tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), - iwl_pcie_tfd_get_num_tbs(trans, tfd)); + iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), + iwl_txq_gen1_tfd_get_num_tbs(trans, + tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); @@ -2509,7 +2096,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); @@ -2520,7 +2107,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_unlock(&txq->lock); return 0; out_err: - iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); spin_unlock(&txq->lock); return -1; } diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c new file mode 100644 index 000000000000..af0b27a68d84 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -0,0 +1,1529 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2020 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2020 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include <net/tso.h> +#include <linux/tcp.h> + +#include "iwl-debug.h" +#include "iwl-io.h" +#include "fw/api/tx.h" +#include "queue/tx.h" +#include "iwl-fh.h" +#include "iwl-scd.h" +#include <linux/dmapool.h> + +/* + * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels + */ +void iwl_txq_gen2_tx_stop(struct iwl_trans *trans) +{ + int txq_id; + + /* + * This function can be called before the op_mode disabled the + * queues. This happens when we have an rfkill interrupt. + * Since we stop Tx altogether - mark the queues as stopped. + */ + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) { + if (!trans->txqs.txq[txq_id]) + continue; + iwl_txq_gen2_unmap(trans, txq_id); + } +} + +/* + * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array + */ +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + u8 filled_tfd_size, num_fetch_chunks; + u16 len = byte_cnt; + __le16 bc_ent; + + if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) + return; + + filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + + num_tbs * sizeof(struct iwl_tfh_tb); + /* + * filled_tfd_size contains the number of filled bytes in the TFD. + * Dividing it by 64 will give the number of chunks to fetch + * to SRAM- 0 for one chunk, 1 for 2 and so on. + * If, for example, TFD contains only 3 TBs then 32 bytes + * of the TFD are used, and only one chunk of 64 bytes should + * be fetched + */ + num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; + + /* Starting from AX210, the HW expects bytes */ + WARN_ON(trans->txqs.bc_table_dword); + WARN_ON(len > 0x3FFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); + scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; + } else { + struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + + /* Before AX210, the HW expects DW */ + WARN_ON(!trans->txqs.bc_table_dword); + len = DIV_ROUND_UP(len, 4); + WARN_ON(len > 0xFFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); + scd_bc_tbl->tfd_offset[idx] = bc_ent; + } +} + +/* + * iwl_txq_inc_wr_ptr - Send new write index to hardware + */ +void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); +} + +static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + return le16_to_cpu(tfd->num_tbs) & 0x1f; +} + +void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd) +{ + int i, num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); + + if (num_tbs > trans->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + return; + } + + /* first TB is never freed - it's the bidirectional DMA data */ + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + } + + tfd->num_tbs = 0; +} + +void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + + lockdep_assert_held(&txq->lock); + + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, idx)); + + /* free SKB */ + if (txq->entries) { + struct sk_buff *skb; + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } + } +} + +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, + dma_addr_t addr, u16 len) +{ + int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); + struct iwl_tfh_tb *tb; + + /* + * Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_txq_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans->txqs.tfd.max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +static struct page *get_workaround_page(struct iwl_trans *trans, + struct sk_buff *skb) +{ + struct page **page_ptr; + struct page *ret; + + page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); + + ret = alloc_page(GFP_ATOMIC); + if (!ret) + return NULL; + + /* set the chaining pointer to the previous page if there */ + *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; + *page_ptr = ret; + + return ret; +} + +/* + * Add a TB and if needed apply the FH HW bug workaround; + * meta != NULL indicates that it's a page mapping and we + * need to dma_unmap_page() and set the meta->tbs bit in + * this case. + */ +static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + dma_addr_t phys, void *virt, + u16 len, struct iwl_cmd_meta *meta) +{ + dma_addr_t oldphys = phys; + struct page *page; + int ret; + + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + + if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); + + if (ret < 0) + goto unmap; + + if (meta) + meta->tbs |= BIT(ret); + + ret = 0; + goto trace; + } + + /* + * Work around a hardware bug. If (as expressed in the + * condition above) the TB ends on a 32-bit boundary, + * then the next TB may be accessed with the wrong + * address. + * To work around it, copy the data elsewhere and make + * a new mapping for it so the device will not fail. + */ + + if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { + ret = -ENOBUFS; + goto unmap; + } + + page = get_workaround_page(trans, skb); + if (!page) { + ret = -ENOMEM; + goto unmap; + } + + memcpy(page_address(page), virt, len); + + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); + if (ret < 0) { + /* unmap the new allocation as single */ + oldphys = phys; + meta = NULL; + goto unmap; + } + IWL_WARN(trans, + "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", + len, (unsigned long long)oldphys, (unsigned long long)phys); + + ret = 0; +unmap: + if (meta) + dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); +trace: + trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); + + return ret; +} + +#ifdef CONFIG_INET +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb) +{ + struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); + struct page **page_ptr; + + page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; + + if (!p->page) + goto alloc; + + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - + sizeof(void *)) + goto out; + + /* We don't have enough room on this page, get a new one. */ + __free_page(p->page); + +alloc: + p->page = alloc_page(GFP_ATOMIC); + if (!p->page) + return NULL; + p->pos = page_address(p->page); + /* set the chaining pointer to NULL */ + *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; +out: + *page_ptr = p->page; + get_page(p->page); + return p; +} +#endif + +static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, int start_len, + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) +{ +#ifdef CONFIG_INET + struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; + unsigned int mss = skb_shinfo(skb)->gso_size; + u16 length, amsdu_pad; + u8 *start_hdr; + struct iwl_tso_hdr_page *hdr_page; + struct tso_t tso; + + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), + &dev_cmd->hdr, start_len, 0); + + ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); + snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); + total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; + amsdu_pad = 0; + + /* total amount of header we may need for this A-MSDU */ + hdr_room = DIV_ROUND_UP(total_len, mss) * + (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); + + /* Our device supports 9 segments at most, it will fit in 1 page */ + hdr_page = get_page_hdr(trans, hdr_room, skb); + if (!hdr_page) + return -ENOMEM; + + start_hdr = hdr_page->pos; + + /* + * Pull the ieee80211 header to be able to use TSO core, + * we will restore it for the tx_status flow. + */ + skb_pull(skb, hdr_len); + + /* + * Remove the length of all the headers that we don't actually + * have in the MPDU by themselves, but that we duplicate into + * all the different MSDUs inside the A-MSDU. + */ + le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); + + tso_start(skb, &tso); + + while (total_len) { + /* this is the data left for this subframe */ + unsigned int data_left = min_t(unsigned int, mss, total_len); + struct sk_buff *csum_skb = NULL; + unsigned int tb_len; + dma_addr_t tb_phys; + u8 *subf_hdrs_start = hdr_page->pos; + + total_len -= data_left; + + memset(hdr_page->pos, 0, amsdu_pad); + hdr_page->pos += amsdu_pad; + amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + + data_left)) & 0x3; + ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); + hdr_page->pos += ETH_ALEN; + ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); + hdr_page->pos += ETH_ALEN; + + length = snap_ip_tcp_hdrlen + data_left; + *((__be16 *)hdr_page->pos) = cpu_to_be16(length); + hdr_page->pos += sizeof(length); + + /* + * This will copy the SNAP as well which will be considered + * as MAC header. + */ + tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + + hdr_page->pos += snap_ip_tcp_hdrlen; + + tb_len = hdr_page->pos - start_hdr; + tb_phys = dma_map_single(trans->dev, start_hdr, + tb_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + dev_kfree_skb(csum_skb); + goto out_err; + } + /* + * No need for _with_wa, this is from the TSO page and + * we leave some space at the end of it so can't hit + * the buggy scenario. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, + tb_phys, tb_len); + /* add this subframe's headers' length to the tx_cmd */ + le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); + + /* prepare the start_hdr for the next subframe */ + start_hdr = hdr_page->pos; + + /* put the payload */ + while (data_left) { + int ret; + + tb_len = min_t(unsigned int, tso.size, data_left); + tb_phys = dma_map_single(trans->dev, tso.data, + tb_len, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, + tb_phys, tso.data, + tb_len, NULL); + if (ret) { + dev_kfree_skb(csum_skb); + goto out_err; + } + + data_left -= tb_len; + tso_build_data(skb, &tso, tb_len); + } + } + + /* re -add the WiFi header */ + skb_push(skb, hdr_len); + + return 0; + +out_err: +#endif + return -EINVAL; +} + +static struct +iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int len; + void *tb1_addr; + + tb_phys = iwl_txq_get_first_tb_dma(txq, idx); + + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + /* do not align A-MSDU to dword as the subframe header aligns it */ + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); + + if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, + hdr_len, dev_cmd)) + goto out_err; + + /* building the A-MSDU might have changed this data, memcpy it now */ + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + return tfd; + +out_err: + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta) +{ + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + unsigned int fragsz = skb_frag_size(frag); + int ret; + + if (!fragsz) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + fragsz, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb_frag_address(frag), + fragsz, out_meta); + if (ret) + return ret; + } + + return 0; +} + +static struct +iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len, + bool pad) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int len, tb1_len, tb2_len; + void *tb1_addr; + struct sk_buff *frag; + + tb_phys = iwl_txq_get_first_tb_dma(txq, idx); + + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + if (pad) + tb1_len = ALIGN(len, 4); + else + tb1_len = len; + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, + IWL_FIRST_TB_SIZE + tb1_len, hdr_len); + + /* set up TFD's third entry to point to remainder of skb's head */ + tb2_len = skb_headlen(skb) - hdr_len; + + if (tb2_len > 0) { + int ret; + + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb->data + hdr_len, tb2_len, + NULL); + if (ret) + goto out_err; + } + + if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) + goto out_err; + + skb_walk_frags(skb, frag) { + int ret; + + tb_phys = dma_map_single(trans->dev, frag->data, + skb_headlen(frag), DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + frag->data, + skb_headlen(frag), NULL); + if (ret) + goto out_err; + if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) + goto out_err; + } + + return tfd; + +out_err: + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static +struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + int len, hdr_len; + bool amsdu; + + /* There must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + + memset(tfd, 0, sizeof(*tfd)); + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + len = sizeof(struct iwl_tx_cmd_gen2); + else + len = sizeof(struct iwl_tx_cmd_gen3); + + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + /* + * Only build A-MSDUs here if doing so by GSO, otherwise it may be + * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been + * built in the higher layers already. + */ + if (amsdu && skb_shinfo(skb)->gso_size) + return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, + out_meta, hdr_len, len); + return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, + hdr_len, len, !amsdu); +} + +int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) +{ + unsigned int max; + unsigned int used; + + /* + * To avoid ambiguity between empty and completely full queues, there + * should always be less than max_tfd_queue_size elements in the queue. + * If q->n_window is smaller than max_tfd_queue_size, there is no need + * to reserve any queue entries for this purpose. + */ + if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) + max = q->n_window; + else + max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; + + /* + * max_tfd_queue_size is a power of 2, so the following is equivalent to + * modulo by max_tfd_queue_size and is well defined. + */ + used = (q->write_ptr - q->read_ptr) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + + if (WARN_ON(used > max)) + return 0; + + return max - used; +} + +int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id) +{ + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + u16 cmd_len; + int idx; + void *tfd; + + if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", txq_id)) + return -EINVAL; + + if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && + __skb_linearize(skb)) + return -ENOMEM; + + spin_lock(&txq->lock); + + if (iwl_txq_space(trans, txq) < txq->high_mark) { + iwl_txq_stop(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_txq_space(trans, txq) < 3)) { + struct iwl_device_tx_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans->txqs.dev_cmd_offs); + + *dev_cmd_ptr = dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + spin_unlock(&txq->lock); + return 0; + } + } + + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + + /* Set up driver data for this TFD */ + txq->entries[idx].skb = skb; + txq->entries[idx].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(idx))); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[idx].meta; + out_meta->flags = 0; + + tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); + if (!tfd) { + spin_unlock(&txq->lock); + return -1; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + + /* Set up entry for this TFD in Tx byte-count array */ + iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, + iwl_txq_gen2_get_num_tbs(trans, tfd)); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + /* Tell device the write index *just past* this latest filled TFD */ + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + iwl_txq_inc_wr_ptr(trans, txq); + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + spin_unlock(&txq->lock); + return 0; +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's + */ +void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + + spin_lock_bh(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, txq->read_ptr); + + if (txq_id != trans->txqs.cmd.q_id) { + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb = txq->entries[idx].skb; + + if (WARN_ON_ONCE(!skb)) + continue; + + iwl_txq_free_tso_page(trans, skb); + } + iwl_txq_gen2_free_tfd(trans, txq); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); + } + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + + spin_unlock_bh(&txq->lock); + + /* just in case - this queue may have been stopped */ + iwl_wake_queue(trans, txq); +} + +static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct device *dev = trans->dev; + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + trans->txqs.tfd.size * txq->n_window, + txq->tfds, txq->dma_addr); + dma_free_coherent(dev, + sizeof(*txq->first_tb_bufs) * txq->n_window, + txq->first_tb_bufs, txq->first_tb_dma); + } + + kfree(txq->entries); + if (txq->bc_tbl.addr) + dma_pool_free(trans->txqs.bc_pool, + txq->bc_tbl.addr, txq->bc_tbl.dma); + kfree(txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_txq *txq; + int i; + + if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", txq_id)) + return; + + txq = trans->txqs.txq[txq_id]; + + if (WARN_ON(!txq)) + return; + + iwl_txq_gen2_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans->txqs.cmd.q_id) + for (i = 0; i < txq->n_window; i++) { + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); + } + del_timer_sync(&txq->stuck_timer); + + iwl_txq_gen2_free_memory(trans, txq); + + trans->txqs.txq[txq_id] = NULL; + + clear_bit(txq_id, trans->txqs.queue_used); +} + +/* + * iwl_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_queue_init(struct iwl_txq *q, int slots_num) +{ + q->n_window = slots_num; + + /* slots_num must be power-of-two size, otherwise + * iwl_txq_get_cmd_index is broken. */ + if (WARN_ON(!is_power_of_2(slots_num))) + return -EINVAL; + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = 0; + q->read_ptr = 0; + + return 0; +} + +int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, + bool cmd_queue) +{ + int ret; + u32 tfd_queue_max_size = + trans->trans_cfg->base_params->max_tfd_queue_size; + + txq->need_update = false; + + /* max_tfd_queue_size must be power-of-two size, otherwise + * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ + if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), + "Max tfd queue size must be a power of two, but is %d", + tfd_queue_max_size)) + return -EINVAL; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + ret = iwl_queue_init(txq, slots_num); + if (ret) + return ret; + + spin_lock_init(&txq->lock); + + if (cmd_queue) { + static struct lock_class_key iwl_txq_cmd_queue_lock_class; + + lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); + } + + __skb_queue_head_init(&txq->overflow_q); + + return 0; +} + +void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) +{ + struct page **page_ptr; + struct page *next; + + page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); + next = *page_ptr; + *page_ptr = NULL; + + while (next) { + struct page *tmp = next; + + next = *(void **)(page_address(next) + PAGE_SIZE - + sizeof(void *)); + __free_page(tmp); + } +} + +void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) +{ + u32 txq_id = txq->id; + u32 status; + bool active; + u8 fifo; + + if (trans->trans_cfg->use_tfh) { + IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, + txq->read_ptr, txq->write_ptr); + /* TODO: access new SCD registers and dump them */ + return; + } + + status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); + fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + + IWL_ERR(trans, + "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", + txq_id, active ? "" : "in", fifo, + jiffies_to_msecs(txq->wd_timeout), + txq->read_ptr, txq->write_ptr, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); +} + +static void iwl_txq_stuck_timer(struct timer_list *t) +{ + struct iwl_txq *txq = from_timer(txq, t, stuck_timer); + struct iwl_trans *trans = txq->trans; + + spin_lock(&txq->lock); + /* check if triggered erroneously */ + if (txq->read_ptr == txq->write_ptr) { + spin_unlock(&txq->lock); + return; + } + spin_unlock(&txq->lock); + + iwl_txq_log_scd_error(trans, txq); + + iwl_force_nmi(trans); +} + +int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, + bool cmd_queue) +{ + size_t tfd_sz = trans->txqs.tfd.size * + trans->trans_cfg->base_params->max_tfd_queue_size; + size_t tb0_buf_sz; + int i; + + if (WARN_ON(txq->entries || txq->tfds)) + return -EINVAL; + + if (trans->trans_cfg->use_tfh) + tfd_sz = trans->txqs.tfd.size * slots_num; + + timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); + txq->trans = trans; + + txq->n_window = slots_num; + + txq->entries = kcalloc(slots_num, + sizeof(struct iwl_pcie_txq_entry), + GFP_KERNEL); + + if (!txq->entries) + goto error; + + if (cmd_queue) + for (i = 0; i < slots_num; i++) { + txq->entries[i].cmd = + kmalloc(sizeof(struct iwl_device_cmd), + GFP_KERNEL); + if (!txq->entries[i].cmd) + goto error; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, + &txq->dma_addr, GFP_KERNEL); + if (!txq->tfds) + goto error; + + BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); + + tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; + + txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, + &txq->first_tb_dma, + GFP_KERNEL); + if (!txq->first_tb_bufs) + goto err_free_tfds; + + return 0; +err_free_tfds: + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); +error: + if (txq->entries && cmd_queue) + for (i = 0; i < slots_num; i++) + kfree(txq->entries[i].cmd); + kfree(txq->entries); + txq->entries = NULL; + + return -ENOMEM; +} + +static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, + struct iwl_txq **intxq, int size, + unsigned int timeout) +{ + size_t bc_tbl_size, bc_tbl_entries; + struct iwl_txq *txq; + int ret; + + WARN_ON(!trans->txqs.bc_tbl_size); + + bc_tbl_size = trans->txqs.bc_tbl_size; + bc_tbl_entries = bc_tbl_size / sizeof(u16); + + if (WARN_ON(size > bc_tbl_entries)) + return -EINVAL; + + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) + return -ENOMEM; + + txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, + &txq->bc_tbl.dma); + if (!txq->bc_tbl.addr) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + kfree(txq); + return -ENOMEM; + } + + ret = iwl_txq_alloc(trans, txq, size, false); + if (ret) { + IWL_ERR(trans, "Tx queue alloc failed\n"); + goto error; + } + ret = iwl_txq_init(trans, txq, size, false); + if (ret) { + IWL_ERR(trans, "Tx queue init failed\n"); + goto error; + } + + txq->wd_timeout = msecs_to_jiffies(timeout); + + *intxq = txq; + return 0; + +error: + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, + struct iwl_host_cmd *hcmd) +{ + struct iwl_tx_queue_cfg_rsp *rsp; + int ret, qid; + u32 wr_ptr; + + if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != + sizeof(*rsp))) { + ret = -EINVAL; + goto error_free_resp; + } + + rsp = (void *)hcmd->resp_pkt->data; + qid = le16_to_cpu(rsp->queue_number); + wr_ptr = le16_to_cpu(rsp->write_pointer); + + if (qid >= ARRAY_SIZE(trans->txqs.txq)) { + WARN_ONCE(1, "queue index %d unsupported", qid); + ret = -EIO; + goto error_free_resp; + } + + if (test_and_set_bit(qid, trans->txqs.queue_used)) { + WARN_ONCE(1, "queue %d already used", qid); + ret = -EIO; + goto error_free_resp; + } + + txq->id = qid; + trans->txqs.txq[qid] = txq; + wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + + /* Place first TFD at index corresponding to start sequence number */ + txq->read_ptr = wr_ptr; + txq->write_ptr = wr_ptr; + + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); + + iwl_free_resp(hcmd); + return qid; + +error_free_resp: + iwl_free_resp(hcmd); + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, + int cmd_id, int size, unsigned int timeout) +{ + struct iwl_txq *txq = NULL; + struct iwl_tx_queue_cfg_cmd cmd = { + .flags = flags, + .sta_id = sta_id, + .tid = tid, + }; + struct iwl_host_cmd hcmd = { + .id = cmd_id, + .len = { sizeof(cmd) }, + .data = { &cmd, }, + .flags = CMD_WANT_SKB, + }; + int ret; + + ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); + if (ret) + return ret; + + cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + goto error; + + return iwl_txq_alloc_response(trans, txq, &hcmd); + +error: + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) +{ + if (WARN(queue >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", queue)) + return; + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", queue); + return; + } + + iwl_txq_gen2_unmap(trans, queue); + + iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]); + + trans->txqs.txq[queue] = NULL; + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); +} + +void iwl_txq_gen2_tx_free(struct iwl_trans *trans) +{ + int i; + + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + + /* Free all TX queues */ + for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { + if (!trans->txqs.txq[i]) + continue; + + iwl_txq_gen2_free(trans, i); + } +} + +int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) +{ + struct iwl_txq *queue; + int ret; + + /* alloc and init the tx queue */ + if (!trans->txqs.txq[txq_id]) { + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) { + IWL_ERR(trans, "Not enough memory for tx queue\n"); + return -ENOMEM; + } + trans->txqs.txq[txq_id] = queue; + ret = iwl_txq_alloc(trans, queue, queue_size, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + } else { + queue = trans->txqs.txq[txq_id]; + } + + ret = iwl_txq_init(trans, queue, queue_size, + (txq_id == trans->txqs.cmd.q_id)); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + trans->txqs.txq[txq_id]->id = txq_id; + set_bit(txq_id, trans->txqs.queue_used); + + return 0; + +error: + iwl_txq_gen2_tx_free(trans); + return ret; +} + +static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, + void *_tfd, u8 idx) +{ + struct iwl_tfd *tfd; + struct iwl_tfd_tb *tb; + dma_addr_t addr; + dma_addr_t hi_len; + + if (trans->trans_cfg->use_tfh) { + struct iwl_tfh_tfd *tfd = _tfd; + struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + + return (dma_addr_t)(le64_to_cpu(tb->addr)); + } + + tfd = _tfd; + tb = &tfd->tbs[idx]; + addr = get_unaligned_le32(&tb->lo); + + if (sizeof(dma_addr_t) <= sizeof(u32)) + return addr; + + hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; + + /* + * shift by 16 twice to avoid warnings on 32-bit + * (where this code never runs anyway due to the + * if statement above) + */ + return addr | ((hi_len << 16) << 16); +} + +void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_txq *txq, int index) +{ + int i, num_tbs; + void *tfd = iwl_txq_get_tfd(trans, txq, index); + + /* Sanity check on number of chunks */ + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); + + if (num_tbs > trans->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* first TB is never freed - it's the bidirectional DMA data */ + + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + iwl_txq_gen1_tfd_tb_get_addr(trans, + tfd, i), + iwl_txq_gen1_tfd_tb_get_len(trans, + tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_txq_gen1_tfd_tb_get_addr(trans, + tfd, i), + iwl_txq_gen1_tfd_tb_get_len(trans, + tfd, i), + DMA_TO_DEVICE); + } + + meta->tbs = 0; + + if (trans->trans_cfg->use_tfh) { + struct iwl_tfh_tfd *tfd_fh = (void *)tfd; + + tfd_fh->num_tbs = 0; + } else { + struct iwl_tfd *tfd_fh = (void *)tfd; + + tfd_fh->num_tbs = 0; + } +} + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +/* + * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwlagn_scd_bc_tbl *scd_bc_tbl; + int write_ptr = txq->write_ptr; + int txq_id = txq->id; + u8 sec_ctl = 0; + u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + u8 sta_id = tx_cmd->sta_id; + + scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; + + sec_ctl = tx_cmd->sec_ctl; + + switch (sec_ctl & TX_CMD_SEC_MSK) { + case TX_CMD_SEC_CCM: + len += IEEE80211_CCMP_MIC_LEN; + break; + case TX_CMD_SEC_TKIP: + len += IEEE80211_TKIP_ICV_LEN; + break; + case TX_CMD_SEC_WEP: + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; + break; + } + if (trans->txqs.bc_table_dword) + len = DIV_ROUND_UP(len, 4); + + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + + bc_ent = cpu_to_le16(len | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = + bc_ent; +} + +void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; + int txq_id = txq->id; + int read_ptr = txq->read_ptr; + u8 sta_id = 0; + __le16 bc_ent; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + + WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != trans->txqs.cmd.q_id) + sta_id = tx_cmd->sta_id; + + bc_ent = cpu_to_le16(1 | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + + if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = + bc_ent; +} diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h new file mode 100644 index 000000000000..c67577dfa21d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -0,0 +1,230 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2020 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2020 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_trans_queue_tx_h__ +#define __iwl_trans_queue_tx_h__ +#include "iwl-fh.h" +#include "fw/api/tx.h" + +struct iwl_tso_hdr_page { + struct page *page; + u8 *pos; +}; + +static inline dma_addr_t +iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) +{ + return txq->first_tb_dma + + sizeof(struct iwl_pcie_first_tb_buf) * idx; +} + +static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) +{ + return index & (q->n_window - 1); +} + +void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id); + +static inline void iwl_wake_queue(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); + iwl_op_mode_queue_not_full(trans->op_mode, txq->id); + } +} + +static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, int idx) +{ + if (trans->trans_cfg->use_tfh) + idx = iwl_txq_get_cmd_index(txq, idx); + + return txq->tfds + trans->txqs.tfd.size * idx; +} + +int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, + bool cmd_queue); +/* + * We need this inline in case dma_addr_t is only 32-bits - since the + * hardware is always 64-bit, the issue can still occur in that case, + * so use u64 for 'phys' here to force the addition in 64-bit. + */ +static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) +{ + return upper_32_bits(phys) != upper_32_bits(phys + len); +} + +int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); + +static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) +{ + if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { + iwl_op_mode_queue_full(trans->op_mode, txq->id); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); + } else { + IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", + txq->id); + } +} + +/** + * iwl_txq_inc_wrap - increment queue index, wrap back to beginning + * @index -- current index + */ +static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) +{ + return ++index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +} + +/** + * iwl_txq_dec_wrap - decrement queue index, wrap back to end + * @index -- current index + */ +static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) +{ + return --index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +} + +static inline bool iwl_txq_used(const struct iwl_txq *q, int i) +{ + int index = iwl_txq_get_cmd_index(q, i); + int r = iwl_txq_get_cmd_index(q, q->read_ptr); + int w = iwl_txq_get_cmd_index(q, q->write_ptr); + + return w >= r ? + (index >= r && index < w) : + !(index < r && index >= w); +} + +void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb); + +void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); + +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd, dma_addr_t addr, + u16 len); + +void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd); + +int iwl_txq_dyn_alloc(struct iwl_trans *trans, + __le16 flags, u8 sta_id, u8 tid, + int cmd_id, int size, + unsigned int timeout); + +int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id); + +void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); +void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); +void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); +void iwl_txq_gen2_tx_stop(struct iwl_trans *trans); +void iwl_txq_gen2_tx_free(struct iwl_trans *trans); +int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, + bool cmd_queue); +int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size); +#ifdef CONFIG_INET +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb); +#endif +static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, + void *_tfd) +{ + struct iwl_tfd *tfd; + + if (trans->trans_cfg->use_tfh) { + struct iwl_tfh_tfd *tfd = _tfd; + + return le16_to_cpu(tfd->num_tbs) & 0x1f; + } + + tfd = (struct iwl_tfd *)_tfd; + return tfd->num_tbs & 0x1f; +} + +static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, + void *_tfd, u8 idx) +{ + struct iwl_tfd *tfd; + struct iwl_tfd_tb *tb; + + if (trans->trans_cfg->use_tfh) { + struct iwl_tfh_tfd *tfd = _tfd; + struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->tb_len); + } + + tfd = (struct iwl_tfd *)_tfd; + tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_txq *txq, int index); +void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq); +void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs); +#endif /* __iwl_trans_queue_tx_h__ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index f40d8c3c3d9e..f3ccbd2b1084 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) default: pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid, vif->vifid, vif->wdev.iftype); + dev_kfree_skb(cmd_skb); ret = -EINVAL; goto out; } @@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, break; default: pr_err("unsupported iftype %d\n", vif->wdev.iftype); + dev_kfree_skb(cmd_skb); ret = -EINVAL; goto out; } diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 985cf5d60615..3852c4f0ac0b 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -229,7 +229,8 @@ static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v) if (!buf) return -ENOMEM; - ret = rtw_dump_drv_rsvd_page(rtwdev, offset, buf_size, (u32 *)buf); + ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RSVD_PAGE, offset, + buf_size, (u32 *)buf); if (ret) { rtw_err(rtwdev, "failed to dump rsvd page\n"); vfree(buf); @@ -543,6 +544,28 @@ static void rtw_print_rate(struct seq_file *m, u8 rate) } } +#define case_REGD(src) \ + case RTW_REGD_##src: return #src + +static const char *rtw_get_regd_string(u8 regd) +{ + switch (regd) { + case_REGD(FCC); + case_REGD(MKK); + case_REGD(ETSI); + case_REGD(IC); + case_REGD(KCC); + case_REGD(ACMA); + case_REGD(CHILE); + case_REGD(UKRAINE); + case_REGD(MEXICO); + case_REGD(CN); + case_REGD(WW); + default: + return "Unknown"; + } +} + static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; @@ -554,6 +577,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) u8 ch = hal->current_channel; u8 regd = rtwdev->regd.txpwr_regd; + seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd)); seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n", "path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem"); diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 63b00bc19000..042015bc8055 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -193,6 +193,15 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, } EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe); +void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev) +{ + if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER) + rtw_fw_recovery(rtwdev); + else + rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n"); +} +EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr); + static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c) { @@ -1404,29 +1413,16 @@ free: return ret; } -int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev, - u32 offset, u32 size, u32 *buf) +static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size, + u32 *buf, u32 residue, u16 start_pg) { - struct rtw_fifo_conf *fifo = &rtwdev->fifo; - u32 residue, i; - u16 start_pg; + u32 i; u16 idx = 0; u16 ctl; u8 rcr; - if (size & 0x3) { - rtw_warn(rtwdev, "should be 4-byte aligned\n"); - return -EINVAL; - } - - offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT; - residue = offset & (FIFO_PAGE_SIZE - 1); - start_pg = offset >> FIFO_PAGE_SIZE_SHIFT; - start_pg += RSVD_PAGE_START_ADDR; - rcr = rtw_read8(rtwdev, REG_RCR + 2); ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000; - /* disable rx clock gate */ rtw_write8(rtwdev, REG_RCR, rcr | BIT(3)); @@ -1448,6 +1444,64 @@ int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev, out: rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl); rtw_write8(rtwdev, REG_RCR + 2, rcr); +} + +static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel, + u32 offset, u32 size, u32 *buf) +{ + struct rtw_chip_info *chip = rtwdev->chip; + u32 start_pg, residue; + + if (sel >= RTW_FW_FIFO_MAX) { + rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n"); + return; + } + if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE) + offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT; + residue = offset & (FIFO_PAGE_SIZE - 1); + start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel]; + + rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg); +} + +static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev, + enum rtw_fw_fifo_sel sel, + u32 start_addr, u32 size) +{ + switch (sel) { + case RTW_FW_FIFO_SEL_TX: + case RTW_FW_FIFO_SEL_RX: + if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel]) + return false; + /*fall through*/ + default: + return true; + } +} + +int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, + u32 *buffer) +{ + if (!rtwdev->chip->fw_fifo_addr) { + rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n"); + return -ENOTSUPP; + } + + if (size == 0 || !buffer) + return -EINVAL; + + if (size & 0x3) { + rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n"); + return -EINVAL; + } + + if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n"); + return -EINVAL; + } + + rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer); + return 0; } diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 686dcd3bbda6..08644540d259 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -16,7 +16,6 @@ #define FIFO_PAGE_SIZE_SHIFT 12 #define FIFO_PAGE_SIZE 4096 -#define RSVD_PAGE_START_ADDR 0x780 #define FIFO_DUMP_ADDR 0x8000 #define DLFW_PAGE_SIZE_SHIFT_LEGACY 12 @@ -508,6 +507,20 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16)) +#define GET_FW_DUMP_LEN(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(15, 0)) +#define GET_FW_DUMP_SEQ(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(22, 16)) +#define GET_FW_DUMP_MORE(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), BIT(23)) +#define GET_FW_DUMP_VERSION(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(31, 24)) +#define GET_FW_DUMP_TLV_TYPE(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(15, 0)) +#define GET_FW_DUMP_TLV_LEN(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16)) +#define GET_FW_DUMP_TLV_VAL(_header) \ + le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0)) static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb) { u32 pkt_offset; @@ -564,5 +577,8 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, struct cfg80211_ssid *ssid); void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable); void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c); +void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev); +int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, + u32 *buffer); #endif diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 6b199152abcf..c92fba2fa480 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -358,13 +358,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, rtw_leave_lps_deep(rtwdev); if (changed & BSS_CHANGED_ASSOC) { - enum rtw_net_type net_type; - + rtw_vif_assoc_changed(rtwvif, conf); if (conf->assoc) { rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH); - net_type = RTW_NET_MGD_LINKED; - rtwvif->aid = conf->aid; rtw_fw_download_rsvd_page(rtwdev); rtw_send_rsvd_page_h2c(rtwdev); rtw_coex_media_status_notify(rtwdev, conf->assoc); @@ -372,12 +369,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, rtw_bf_assoc(rtwdev, vif, conf); } else { rtw_leave_lps(rtwdev); - net_type = RTW_NET_NO_LINK; - rtwvif->aid = 0; rtw_bf_disassoc(rtwdev, vif, conf); } - rtwvif->net_type = net_type; config |= PORT_SET_NET_TYPE; config |= PORT_SET_AID; } @@ -429,56 +423,17 @@ static int rtw_ops_conf_tx(struct ieee80211_hw *hw, return 0; } -static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) -{ - unsigned long mac_id; - - mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); - if (mac_id < RTW_MAX_MAC_ID_NUM) - set_bit(mac_id, rtwdev->mac_id_map); - - return mac_id; -} - -static void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) -{ - clear_bit(mac_id, rtwdev->mac_id_map); -} - static int rtw_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = hw->priv; - struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; - int i; int ret = 0; mutex_lock(&rtwdev->mutex); - - si->mac_id = rtw_acquire_macid(rtwdev); - if (si->mac_id >= RTW_MAX_MAC_ID_NUM) { - ret = -ENOSPC; - goto out; - } - - si->sta = sta; - si->vif = vif; - si->init_ra_lv = 1; - ewma_rssi_init(&si->avg_rssi); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - rtw_txq_init(rtwdev, sta->txq[i]); - - rtw_update_sta_info(rtwdev, si); - rtw_fw_media_status_report(rtwdev, si->mac_id, true); - - rtwdev->sta_cnt++; - - rtw_info(rtwdev, "sta %pM joined with macid %d\n", - sta->addr, si->mac_id); - -out: + ret = rtw_sta_add(rtwdev, sta, vif); mutex_unlock(&rtwdev->mutex); + return ret; } @@ -487,25 +442,11 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = hw->priv; - struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; - int i; mutex_lock(&rtwdev->mutex); - - rtw_release_macid(rtwdev, si->mac_id); - rtw_fw_media_status_report(rtwdev, si->mac_id, false); - - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - rtw_txq_cleanup(rtwdev, sta->txq[i]); - - kfree(si->mask); - - rtwdev->sta_cnt--; - - rtw_info(rtwdev, "sta %pM with macid %d left\n", - sta->addr, si->mac_id); - + rtw_sta_remove(rtwdev, sta, true); mutex_unlock(&rtwdev->mutex); + return 0; } @@ -845,6 +786,17 @@ static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled) } #endif +static void rtw_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) + clear_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + mutex_unlock(&rtwdev->mutex); +} + const struct ieee80211_ops rtw_ops = { .tx = rtw_ops_tx, .wake_tx_queue = rtw_ops_wake_tx_queue, @@ -871,6 +823,7 @@ const struct ieee80211_ops rtw_ops = { .set_bitrate_mask = rtw_ops_set_bitrate_mask, .set_antenna = rtw_ops_set_antenna, .get_antenna = rtw_ops_get_antenna, + .reconfig_complete = rtw_reconfig_complete, #ifdef CONFIG_PM .suspend = rtw_ops_suspend, .resume = rtw_ops_resume, diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index cc82c80f0433..565efd880624 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -259,6 +259,198 @@ static void rtw_c2h_work(struct work_struct *work) } } +static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) +{ + unsigned long mac_id; + + mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); + if (mac_id < RTW_MAX_MAC_ID_NUM) + set_bit(mac_id, rtwdev->mac_id_map); + + return mac_id; +} + +int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) +{ + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + int i; + + si->mac_id = rtw_acquire_macid(rtwdev); + if (si->mac_id >= RTW_MAX_MAC_ID_NUM) + return -ENOSPC; + + si->sta = sta; + si->vif = vif; + si->init_ra_lv = 1; + ewma_rssi_init(&si->avg_rssi); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + rtw_txq_init(rtwdev, sta->txq[i]); + + rtw_update_sta_info(rtwdev, si); + rtw_fw_media_status_report(rtwdev, si->mac_id, true); + + rtwdev->sta_cnt++; + rtw_info(rtwdev, "sta %pM joined with macid %d\n", + sta->addr, si->mac_id); + + return 0; +} + +void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + bool fw_exist) +{ + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + int i; + + rtw_release_macid(rtwdev, si->mac_id); + if (fw_exist) + rtw_fw_media_status_report(rtwdev, si->mac_id, false); + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + rtw_txq_cleanup(rtwdev, sta->txq[i]); + + kfree(si->mask); + + rtwdev->sta_cnt--; + rtw_info(rtwdev, "sta %pM with macid %d left\n", + sta->addr, si->mac_id); +} + +static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) +{ + u32 size = rtwdev->chip->fw_rxff_size; + u32 *buf; + u8 seq; + bool ret = true; + + buf = vmalloc(size); + if (!buf) + goto exit; + + if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n"); + goto free_buf; + } + + if (GET_FW_DUMP_LEN(buf) == 0) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n"); + goto free_buf; + } + + seq = GET_FW_DUMP_SEQ(buf); + if (seq > 0 && seq != (rtwdev->fw.prev_dump_seq + 1)) { + rtw_dbg(rtwdev, RTW_DBG_FW, + "fw crash dump's seq is wrong: %d\n", seq); + goto free_buf; + } + if (seq == 0 && + (GET_FW_DUMP_TLV_TYPE(buf) != FW_CD_TYPE || + GET_FW_DUMP_TLV_LEN(buf) != FW_CD_LEN || + GET_FW_DUMP_TLV_VAL(buf) != FW_CD_VAL)) { + rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's tlv is wrong\n"); + goto free_buf; + } + + print_hex_dump_bytes("rtw88 fw dump: ", DUMP_PREFIX_OFFSET, buf, size); + + if (GET_FW_DUMP_MORE(buf) == 1) { + rtwdev->fw.prev_dump_seq = seq; + ret = false; + } + +free_buf: + vfree(buf); +exit: + rtw_write8(rtwdev, REG_MCU_TST_CFG, 0); + + return ret; +} + +void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, + struct ieee80211_bss_conf *conf) +{ + if (conf && conf->assoc) { + rtwvif->aid = conf->aid; + rtwvif->net_type = RTW_NET_MGD_LINKED; + } else { + rtwvif->aid = 0; + rtwvif->net_type = RTW_NET_NO_LINK; + } +} + +static void rtw_reset_key_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + struct rtw_sec_desc *sec = &rtwdev->sec; + + rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); +} + +static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + + if (rtwdev->sta_cnt == 0) { + rtw_warn(rtwdev, "sta count before reset should not be 0\n"); + return; + } + rtw_sta_remove(rtwdev, sta, false); +} + +static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = (struct rtw_dev *)data; + struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + + rtw_bf_disassoc(rtwdev, vif, NULL); + rtw_vif_assoc_changed(rtwvif, NULL); + rtw_txq_cleanup(rtwdev, vif->txq); +} + +void rtw_fw_recovery(struct rtw_dev *rtwdev) +{ + if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) + ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); +} + +static void rtw_fw_recovery_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + fw_recovery_work); + + /* rtw_fw_dump_crash_log() returns false indicates that there are + * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware + * to dump the remaining part of the log, and firmware will trigger an + * IMR_C2HCMD interrupt to inform driver the log is ready. + */ + if (!rtw_fw_dump_crash_log(rtwdev)) { + rtw_write8(rtwdev, REG_HRCV_MSG, 1); + return; + } + rtwdev->fw.prev_dump_seq = 0; + + WARN(1, "firmware crash, start reset and recover\n"); + + mutex_lock(&rtwdev->mutex); + + set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); + rcu_read_lock(); + rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); + rcu_read_unlock(); + rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); + rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); + rtw_enter_ips(rtwdev); + + mutex_unlock(&rtwdev->mutex); + + ieee80211_restart_hw(rtwdev->hw); +} + struct rtw_txq_ba_iter_data { }; @@ -1431,6 +1623,7 @@ int rtw_core_init(struct rtw_dev *rtwdev) INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); + INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work); INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); skb_queue_head_init(&rtwdev->c2h_queue); skb_queue_head_init(&rtwdev->coex.queue); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 276b5d381467..ffb02e614217 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -359,6 +359,7 @@ enum rtw_flags { RTW_FLAG_DIG_DISABLE, RTW_FLAG_BUSY_TRAFFIC, RTW_FLAG_WOWLAN, + RTW_FLAG_RESTARTING, NUM_OF_RTW_FLAGS, }; @@ -1082,6 +1083,17 @@ enum rtw_wlan_cpu { RTW_WCPU_11N, }; +enum rtw_fw_fifo_sel { + RTW_FW_FIFO_SEL_TX, + RTW_FW_FIFO_SEL_RX, + RTW_FW_FIFO_SEL_RSVD_PAGE, + RTW_FW_FIFO_SEL_REPORT, + RTW_FW_FIFO_SEL_LLT, + RTW_FW_FIFO_SEL_RXBUF_FW, + + RTW_FW_FIFO_MAX, +}; + /* hardware configuration for each IC */ struct rtw_chip_info { struct rtw_chip_ops *ops; @@ -1098,6 +1110,7 @@ struct rtw_chip_info { u32 ptct_efuse_size; u32 txff_size; u32 rxff_size; + u32 fw_rxff_size; u8 band; u8 page_size; u8 csi_buf_pg_num; @@ -1108,6 +1121,8 @@ struct rtw_chip_info { bool rx_ldpc; u8 max_power_index; + u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; + bool ht_supported; bool vht_supported; u8 lps_deep_mode_supported; @@ -1606,6 +1621,9 @@ struct rtw_fifo_conf { const struct rtw_rqpn *rqpn; }; +#define FW_CD_TYPE 0xffff +#define FW_CD_LEN 4 +#define FW_CD_VAL 0xaabbccdd struct rtw_fw_state { const struct firmware *firmware; struct rtw_dev *rtwdev; @@ -1614,6 +1632,7 @@ struct rtw_fw_state { u8 sub_version; u8 sub_index; u16 h2c_version; + u8 prev_dump_seq; }; struct rtw_hal { @@ -1699,6 +1718,7 @@ struct rtw_dev { /* c2h cmd queue & handler work */ struct sk_buff_head c2h_queue; struct work_struct c2h_work; + struct work_struct fw_recovery_work; /* used to protect txqs list */ spinlock_t txq_lock; @@ -1799,6 +1819,11 @@ static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev) return rtwdev->chip->rx_ldpc; } +static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id) +{ + clear_bit(mac_id, rtwdev->mac_id_map); +} + void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); @@ -1821,5 +1846,12 @@ void rtw_core_deinit(struct rtw_dev *rtwdev); int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw); void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw); u16 rtw_desc_to_bitrate(u8 desc_rate); +void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, + struct ieee80211_bss_conf *conf); +int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + struct ieee80211_vif *vif); +void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, + bool fw_exist); +void rtw_fw_recovery(struct rtw_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 135dd331691c..676d861aaf99 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -389,6 +389,7 @@ static int rtw_pci_init(struct rtw_dev *rtwdev) IMR_VODOK | IMR_ROK | IMR_BCNDMAINT_E | + IMR_C2HCMD | 0; rtwpci->irq_mask[1] = IMR_TXFOVW | 0; @@ -1079,6 +1080,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); if (irq_status[0] & IMR_ROK) rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); + if (unlikely(irq_status[0] & IMR_C2HCMD)) + rtw_fw_c2h_cmd_isr(rtwdev); /* all of the jobs for this interrupt have been done */ rtw_pci_enable_interrupt(rtwdev, rtwpci); @@ -1598,6 +1601,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev) if (chip->ops->shutdown) chip->ops->shutdown(rtwdev); + + pci_set_power_state(pdev, PCI_D3hot); } EXPORT_SYMBOL(rtw_pci_shutdown); diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index 024c2bc275cb..ca17aa9cf7dc 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -9,8 +9,8 @@ #define RTK_BEQ_TX_DESC_NUM 256 #define RTK_MAX_RX_DESC_NUM 512 -/* 8K + rx desc size */ -#define RTK_PCI_RX_BUF_SIZE (8192 + 24) +/* 11K + rx desc size */ +#define RTK_PCI_RX_BUF_SIZE (11454 + 24) #define RTK_PCI_CTRL 0x300 #define BIT_RST_TRXDMA_INTF BIT(20) diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 8f468d6b5f78..86b94c008a27 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -126,6 +126,9 @@ BIT_WINTINI_RDY | BIT_RAM_DL_SEL) #define FW_READY_MASK 0xffff +#define REG_MCU_TST_CFG 0x84 +#define VAL_FW_TRIGGER 0x1 + #define REG_EFUSE_ACCESS 0x00CF #define EFUSE_ACCESS_ON 0x69 #define EFUSE_ACCESS_OFF 0x00 @@ -616,6 +619,8 @@ #define BIT_ANAPAR_BTPS BIT(22) #define REG_RSTB_SEL 0x1c38 +#define REG_HRCV_MSG 0x1cf + #define REG_IGN_GNTBT4 0x4160 #define RF_MODE 0x00 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index b7a98dbbb09c..22d0dd640ac9 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2442,6 +2442,7 @@ struct rtw_chip_info rtw8822b_hw_spec = { .ptct_efuse_size = 96, .txff_size = 262144, .rxff_size = 24576, + .fw_rxff_size = 12288, .txgi_factor = 1, .is_pwr_by_rate_dec = true, .max_power_index = 0x3f, @@ -2504,6 +2505,8 @@ struct rtw_chip_info rtw8822b_hw_spec = { .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822b), .coex_info_hw_regs = coex_info_hw_regs_8822b, + + .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680}, }; EXPORT_SYMBOL(rtw8822b_hw_spec); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index dd216a23fc99..e37300e98517 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -4294,6 +4294,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .ptct_efuse_size = 124, .txff_size = 262144, .rxff_size = 24576, + .fw_rxff_size = 12288, .txgi_factor = 2, .is_pwr_by_rate_dec = false, .max_power_index = 0x7f, @@ -4364,6 +4365,8 @@ struct rtw_chip_info rtw8822c_hw_spec = { .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822c), .coex_info_hw_regs = coex_info_hw_regs_8822c, + + .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680}, }; EXPORT_SYMBOL(rtw8822c_hw_spec); diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h index 41c10e7144df..0c23b5069be0 100644 --- a/drivers/net/wireless/realtek/rtw88/util.h +++ b/drivers/net/wireless/realtek/rtw88/util.h @@ -17,6 +17,8 @@ struct rtw_dev; ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data) #define rtw_iterate_keys(rtwdev, vif, iterator, data) \ ieee80211_iter_keys(rtwdev->hw, vif, iterator, data) +#define rtw_iterate_keys_rcu(rtwdev, vif, iterator, data) \ + ieee80211_iter_keys_rcu((rtwdev)->hw, vif, iterator, data) static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr) { diff --git a/drivers/net/wireless/ti/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h index e03f8321ea60..890176c915ab 100644 --- a/drivers/net/wireless/ti/wl1251/reg.h +++ b/drivers/net/wireless/ti/wl1251/reg.h @@ -217,7 +217,7 @@ enum wl12xx_acx_int_reg { Halt eCPU - 32bit RW ------------------------------------------ 0 HALT_ECPU Halt Embedded CPU - This bit is the - compliment of bit 1 (MDATA2) in the SOR_CFG register. + complement of bit 1 (MDATA2) in the SOR_CFG register. During a hardware reset, this bit holds the inverse of MDATA2. When downloading firmware from the host, diff --git a/drivers/net/wireless/ti/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h index 247f558ba630..8ff018808020 100644 --- a/drivers/net/wireless/ti/wl12xx/reg.h +++ b/drivers/net/wireless/ti/wl12xx/reg.h @@ -139,7 +139,7 @@ Halt eCPU - 32bit RW ------------------------------------------ 0 HALT_ECPU Halt Embedded CPU - This bit is the - compliment of bit 1 (MDATA2) in the SOR_CFG register. + complement of bit 1 (MDATA2) in the SOR_CFG register. During a hardware reset, this bit holds the inverse of MDATA2. When downloading firmware from the host, diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 4e7a2140649b..026e88b80bfc 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1433,9 +1433,7 @@ static void wl3501_detach(struct pcmcia_device *link) wl3501_release(link); unregister_netdev(dev); - - if (link->priv) - free_netdev(link->priv); + free_netdev(dev); } static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info, |