diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/wmi-tlv.c')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/wmi-tlv.c | 582 |
1 files changed, 567 insertions, 15 deletions
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index ee0c5f602e29..8fdba3865c96 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -16,10 +16,13 @@ */ #include "core.h" #include "debug.h" +#include "mac.h" #include "hw.h" +#include "mac.h" #include "wmi.h" #include "wmi-ops.h" #include "wmi-tlv.h" +#include "p2p.h" /***************/ /* TLV helpers */ @@ -31,9 +34,9 @@ struct wmi_tlv_policy { static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TLV_TAG_ARRAY_BYTE] - = { .min_len = sizeof(u8) }, + = { .min_len = 0 }, [WMI_TLV_TAG_ARRAY_UINT32] - = { .min_len = sizeof(u32) }, + = { .min_len = 0 }, [WMI_TLV_TAG_STRUCT_SCAN_EVENT] = { .min_len = sizeof(struct wmi_scan_event) }, [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR] @@ -62,6 +65,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) }, [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT] = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) }, + [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT] + = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) }, + [WMI_TLV_TAG_STRUCT_ROAM_EVENT] + = { .min_len = sizeof(struct wmi_tlv_roam_ev) }, + [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO] + = { .min_len = sizeof(struct wmi_tlv_wow_event_info) }, + [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT] + = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) }, }; static int @@ -168,6 +179,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, { const void **tb; const struct wmi_tlv_bcn_tx_status_ev *ev; + struct ath10k_vif *arvif; u32 vdev_id, tx_status; int ret; @@ -201,6 +213,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, break; } + arvif = ath10k_get_arvif(ar, vdev_id); + if (arvif && arvif->is_up && arvif->vif->csa_active) + ieee80211_queue_work(ar->hw, &arvif->ap_csa_work); + kfree(tb); return 0; } @@ -296,6 +312,83 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_p2p_noa_ev *ev; + const struct wmi_p2p_noa_info *noa; + int ret, vdev_id; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]; + noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]; + + if (!ev || !noa) { + kfree(tb); + return -EPROTO; + } + + vdev_id = __le32_to_cpu(ev->vdev_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv p2p noa vdev_id %i descriptors %hhu\n", + vdev_id, noa->num_descriptors); + + ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); + kfree(tb); + return 0; +} + +static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_tlv_tx_pause_ev *ev; + int ret, vdev_id; + u32 pause_id, action, vdev_map, peer_id, tid_map; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + pause_id = __le32_to_cpu(ev->pause_id); + action = __le32_to_cpu(ev->action); + vdev_map = __le32_to_cpu(ev->vdev_map); + peer_id = __le32_to_cpu(ev->peer_id); + tid_map = __le32_to_cpu(ev->tid_map); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n", + pause_id, action, vdev_map, peer_id, tid_map); + + for (vdev_id = 0; vdev_map; vdev_id++) { + if (!(vdev_map & BIT(vdev_id))) + continue; + + vdev_map &= ~BIT(vdev_id); + ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action); + } + + kfree(tb); + return 0; +} + /***********/ /* TLV ops */ /***********/ @@ -309,7 +402,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) - return; + goto out; trace_ath10k_wmi_event(ar, id, skb->data, skb->len); @@ -417,11 +510,18 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_DIAG_EVENTID: ath10k_wmi_tlv_event_diag(ar, skb); break; + case WMI_TLV_P2P_NOA_EVENTID: + ath10k_wmi_tlv_event_p2p_noa(ar, skb); + break; + case WMI_TLV_TX_PAUSE_EVENTID: + ath10k_wmi_tlv_event_tx_pause(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } +out: dev_kfree_skb(skb); } @@ -1012,6 +1112,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_roam_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_roam_ev *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->vdev_id = ev->vdev_id; + arg->reason = ev->reason; + arg->rssi = ev->rssi; + + kfree(tb); + return 0; +} + +static int +ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_wow_ev_arg *arg) +{ + const void **tb; + const struct wmi_tlv_wow_event_info *ev; + int ret; + + tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]; + if (!ev) { + kfree(tb); + return -EPROTO; + } + + arg->vdev_id = __le32_to_cpu(ev->vdev_id); + arg->flag = __le32_to_cpu(ev->flag); + arg->wake_reason = __le32_to_cpu(ev->wake_reason); + arg->data_len = __le32_to_cpu(ev->data_len); + + kfree(tb); + return 0; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) { @@ -1160,8 +1319,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { - cfg->num_offload_peers = __cpu_to_le32(3); - cfg->num_offload_reorder_bufs = __cpu_to_le32(3); + cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); } else { cfg->num_offload_peers = __cpu_to_le32(0); cfg->num_offload_reorder_bufs = __cpu_to_le32(0); @@ -1178,8 +1337,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); cfg->rx_decap_mode = __cpu_to_le32(1); cfg->scan_max_pending_reqs = __cpu_to_le32(4); - cfg->bmiss_offload_max_vdev = __cpu_to_le32(3); - cfg->roam_offload_max_vdev = __cpu_to_le32(3); + cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); + cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8); cfg->num_mcast_groups = __cpu_to_le32(0); cfg->num_mcast_table_elems = __cpu_to_le32(0); @@ -1193,11 +1352,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->gtk_offload_max_vdev = __cpu_to_le32(2); cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC); cfg->max_frag_entries = __cpu_to_le32(2); - cfg->num_tdls_vdevs = __cpu_to_le32(1); + cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS); cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20); cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2); cfg->num_multicast_filter_entries = __cpu_to_le32(5); - cfg->num_wow_filters = __cpu_to_le32(0x16); + cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns); cfg->num_keep_alive_pattern = __cpu_to_le32(6); cfg->keep_alive_pattern_size = __cpu_to_le32(0); cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); @@ -1248,7 +1407,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, cmd = (void *)tlv->value; ath10k_wmi_put_start_scan_common(&cmd->common, arg); - cmd->burst_duration_ms = __cpu_to_le32(0); + cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms); cmd->num_channels = __cpu_to_le32(arg->n_channels); cmd->num_ssids = __cpu_to_le32(arg->n_ssids); cmd->num_bssids = __cpu_to_le32(arg->n_bssids); @@ -1408,8 +1567,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, void *ptr; u32 flags = 0; - if (WARN_ON(arg->ssid && arg->ssid_len == 0)) - return ERR_PTR(-EINVAL); if (WARN_ON(arg->hidden_ssid && !arg->ssid)) return ERR_PTR(-EINVAL); if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) @@ -1782,7 +1939,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, static struct sk_buff * ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, - const u8 peer_addr[ETH_ALEN]) + const u8 peer_addr[ETH_ALEN], + enum wmi_peer_type peer_type) { struct wmi_tlv_peer_create_cmd *cmd; struct wmi_tlv *tlv; @@ -1797,7 +1955,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, tlv->len = __cpu_to_le16(sizeof(*cmd)); cmd = (void *)tlv->value; cmd->vdev_id = __cpu_to_le32(vdev_id); - cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */ + cmd->peer_type = __cpu_to_le32(peer_type); ether_addr_copy(cmd->peer_addr.addr, peer_addr); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n"); @@ -2027,7 +2185,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, if (!mac) return ERR_PTR(-EINVAL); - skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); @@ -2485,6 +2643,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, return skb; } +static struct sk_buff * +ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, + enum wmi_tdls_state state) +{ + struct wmi_tdls_set_state_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + /* Set to options from wmi_tlv_tdls_options, + * for now none of them are enabled. + */ + u32 options = 0; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->state = __cpu_to_le32(state); + cmd->notification_interval_ms = __cpu_to_le32(5000); + cmd->tx_discovery_threshold = __cpu_to_le32(100); + cmd->tx_teardown_threshold = __cpu_to_le32(5); + cmd->rssi_teardown_threshold = __cpu_to_le32(-75); + cmd->rssi_delta = __cpu_to_le32(-20); + cmd->tdls_options = __cpu_to_le32(options); + cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); + cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); + cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); + cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); + cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n", + state, vdev_id); + return skb; +} + +static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp) +{ + u32 peer_qos = 0; + + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE; + + peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP); + + return peer_qos; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan_arg) +{ + struct wmi_tdls_peer_update_cmd *cmd; + struct wmi_tdls_peer_capab *peer_cap; + struct wmi_channel *chan; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u32 peer_qos; + void *ptr; + int len; + int i; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + sizeof(*peer_cap) + + sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + + cmd = (void *)tlv->value; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); + cmd->peer_state = __cpu_to_le32(arg->peer_state); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES); + tlv->len = __cpu_to_le16(sizeof(*peer_cap)); + peer_cap = (void *)tlv->value; + peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues, + cap->peer_max_sp); + peer_cap->peer_qos = __cpu_to_le32(peer_qos); + peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); + peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); + peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); + peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); + peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); + peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); + + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) + peer_cap->peer_operclass[i] = cap->peer_operclass[i]; + + peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); + peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); + peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); + + ptr += sizeof(*tlv); + ptr += sizeof(*peer_cap); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan)); + + ptr += sizeof(*tlv); + + for (i = 0; i < cap->peer_chan_len; i++) { + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); + tlv->len = __cpu_to_le16(sizeof(*chan)); + chan = (void *)tlv->value; + ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]); + + ptr += sizeof(*tlv); + ptr += sizeof(*chan); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv tdls peer update vdev %i state %d n_chans %u\n", + arg->vdev_id, arg->peer_state, cap->peer_chan_len); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) +{ + struct wmi_tlv_wow_enable_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->enable = __cpu_to_le32(1); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar, + u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_tlv_wow_add_del_event_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->is_add = __cpu_to_le32(enable); + cmd->event_bitmap = __cpu_to_le32(1 << event); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar) +{ + struct wmi_tlv_wow_host_wakeup_ind *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id, + u32 pattern_id, const u8 *pattern, + const u8 *bitmask, int pattern_len, + int pattern_offset) +{ + struct wmi_tlv_wow_add_pattern_cmd *cmd; + struct wmi_tlv_wow_bitmap_pattern *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + /* cmd */ + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->pattern_id = __cpu_to_le32(pattern_id); + cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T); + tlv->len = __cpu_to_le16(sizeof(*bitmap)); + bitmap = (void *)tlv->value; + + memcpy(bitmap->patternbuf, pattern, pattern_len); + memcpy(bitmap->bitmaskbuf, bitmask, pattern_len); + bitmap->pattern_offset = __cpu_to_le32(pattern_offset); + bitmap->pattern_len = __cpu_to_le32(pattern_len); + bitmap->bitmask_len = __cpu_to_le32(pattern_len); + bitmap->pattern_id = __cpu_to_le32(pattern_id); + + ptr += sizeof(*tlv); + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); + tlv->len = __cpu_to_le16(sizeof(u32)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n", + vdev_id, pattern_id, pattern_offset); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, + u32 pattern_id) +{ + struct wmi_tlv_wow_del_pattern_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (struct wmi_tlv *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->pattern_id = __cpu_to_le32(pattern_id); + cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + return skb; +} + +static struct sk_buff * +ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) +{ + struct wmi_tlv_adaptive_qcs *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*tlv) + sizeof(*cmd); + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (void *)skb->data; + tlv = ptr; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->enable = __cpu_to_le32(enable ? 1 : 0); + + ptr += sizeof(*tlv); + ptr += sizeof(*cmd); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable); + return skb; +} + /****************/ /* TLV mappings */ /****************/ @@ -2609,6 +3148,9 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED, .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, + .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, + .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, + .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, }; static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { @@ -2736,6 +3278,8 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, + .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, + .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, @@ -2781,6 +3325,14 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, + .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable, + .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event, + .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, + .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, + .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, + .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, + .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, + .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, }; /************/ |