diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath11k/wmi.c')
-rw-r--r-- | drivers/net/wireless/ath/ath11k/wmi.c | 856 |
1 files changed, 814 insertions, 42 deletions
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 2751fe8814df..84d1c7054013 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -390,6 +391,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; ab->target_pdev_count++; + if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) && + !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP)) + return -EINVAL; + /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be * handled. @@ -397,7 +402,9 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; - } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { + } + + if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; @@ -407,8 +414,6 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); pdev_cap->nss_ratio_info = WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); - } else { - return -EINVAL; } /* tx/rx chainmask reported from fw depends on the actual hw chains used, @@ -620,10 +625,25 @@ struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) return skb; } +static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar, + struct ieee80211_tx_info *info) +{ + struct ath11k_base *ab = ar->ab; + u32 freq = 0; + + if (ab->hw_params.support_off_channel_tx && + ar->scan.is_roc && + (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + return freq; +} + int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame) { struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); struct wmi_mgmt_send_cmd *cmd; struct wmi_tlv *frame_tlv; struct sk_buff *skb; @@ -644,7 +664,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->desc_id = buf_id; - cmd->chanfreq = 0; + cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info); cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->frame_len = frame->len; @@ -5259,6 +5279,8 @@ static void ath11k_wmi_event_scan_started(struct ath11k *ar) break; case ATH11K_SCAN_STARTING: ar->scan.state = ATH11K_SCAN_RUNNING; + if (ar->scan.is_roc) + ieee80211_ready_on_channel(ar->hw); complete(&ar->scan.started); break; } @@ -5341,6 +5363,8 @@ static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); break; } } @@ -5789,9 +5813,9 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, arvif->bssid, NULL); if (!sta) { - ath11k_warn(ab, "not found station for bssid %pM\n", - arvif->bssid); - ret = -EPROTO; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "not found station of bssid %pM for rssi chain\n", + arvif->bssid); goto exit; } @@ -5889,8 +5913,9 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, "wmi stats vdev id %d snr %d\n", src->vdev_id, src->beacon_snr); } else { - ath11k_warn(ab, "not found station for bssid %pM\n", - arvif->bssid); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "not found station of bssid %pM for vdev stat\n", + arvif->bssid); } } @@ -7297,47 +7322,64 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, rcu_read_unlock(); } -static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) { - const void **tb; const struct wmi_service_available_event *ev; - int ret; + u32 *wmi_ext2_service_bitmap; int i, j; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return; - } + switch (tag) { + case WMI_TAG_SERVICE_AVAILABLE_EVENT: + ev = (struct wmi_service_available_event *)ptr; + for (i = 0, j = WMI_MAX_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; + i++) { + do { + if (ev->wmi_service_segment_bitmap[i] & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } - ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT]; - if (!ev) { - ath11k_warn(ab, "failed to fetch svc available ev"); - kfree(tb); - return; - } + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", + ev->wmi_service_segment_bitmap[0], + ev->wmi_service_segment_bitmap[1], + ev->wmi_service_segment_bitmap[2], + ev->wmi_service_segment_bitmap[3]); + break; + case WMI_TAG_ARRAY_UINT32: + wmi_ext2_service_bitmap = (u32 *)ptr; + for (i = 0, j = WMI_MAX_EXT_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; + i++) { + do { + if (wmi_ext2_service_bitmap[i] & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } - /* TODO: Use wmi_service_segment_offset information to get the service - * especially when more services are advertised in multiple sevice - * available events. - */ - for (i = 0, j = WMI_MAX_SERVICE; - i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; - i++) { - do { - if (ev->wmi_service_segment_bitmap[i] & - BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) - set_bit(j, ab->wmi_ab.svc_map); - } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", + wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], + wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); + break; } + return 0; +} - ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x", - ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], - ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); +static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + int ret; - kfree(tb); + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_services_parser, + NULL); + if (ret) + ath11k_warn(ab, "failed to parse services available tlv %d\n", ret); } static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) @@ -7777,6 +7819,56 @@ exit: kfree(tb); } +static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_gtk_offload_status_event *ev; + struct ath11k_vif *arvif; + __be64 replay_ctr_be; + u64 replay_ctr; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch gtk offload status ev"); + kfree(tb); + return; + } + + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); + if (!arvif) { + ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", + ev->vdev_id); + kfree(tb); + return; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n", + ev->refresh_cnt); + ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt", + NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES); + + replay_ctr = ev->replay_ctr.word1; + replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0; + arvif->rekey_data.replay_ctr = replay_ctr; + + /* supplicant expects big-endian replay counter */ + replay_ctr_be = cpu_to_be64(replay_ctr); + + ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, + (void *)&replay_ctr_be, GFP_ATOMIC); + + kfree(tb); +} + static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7908,6 +8000,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_DIAG_EVENTID: ath11k_wmi_diag_event(ab, skb); break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath11k_wmi_gtk_offload_status_event(ab, skb); + break; /* TODO: Add remaining events */ default: ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -8155,7 +8250,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; /* It's overwritten when service_ext_ready is handled */ - if (ab->hw_params.single_pdev_only) + if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; /* TODO: Init remaining wmi soc resources required */ @@ -8177,6 +8272,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab) ath11k_wmi_free_dbring_caps(ab); } +int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, + u32 filter_bitmap, bool enable) +{ + struct wmi_hw_data_filter_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_hw_data_filter_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->enable = enable; + + /* Set all modes in case of disable */ + if (cmd->enable) + cmd->hw_filter_bitmap = filter_bitmap; + else + cmd->hw_filter_bitmap = ((u32)~0U); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi hw data filter enable %d filter_bitmap 0x%x\n", + enable, filter_bitmap); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); +} + int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) { struct wmi_wow_host_wakeup_ind *cmd; @@ -8247,3 +8375,647 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); } + +int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_wow_add_del_event_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->is_add = enable; + cmd->event_bitmap = (1 << event); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); +} + +int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct wmi_wow_add_pattern_cmd *cmd; + struct wmi_wow_bitmap_pattern *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *ptr; + size_t len; + + len = sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + /* cmd */ + ptr = (u8 *)skb->data; + cmd = (struct wmi_wow_add_pattern_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_ADD_PATTERN_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + bitmap = (struct wmi_wow_bitmap_pattern *)ptr; + bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_BITMAP_PATTERN_T) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE); + + memcpy(bitmap->patternbuf, pattern, pattern_len); + ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4)); + memcpy(bitmap->bitmaskbuf, mask, pattern_len); + ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4)); + bitmap->pattern_offset = pattern_offset; + bitmap->pattern_len = pattern_len; + bitmap->bitmask_len = pattern_len; + bitmap->pattern_id = pattern_id; + + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, sizeof(u32)); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n", + vdev_id, pattern_id, pattern_offset); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); +} + +int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) +{ + struct wmi_wow_del_pattern_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_DEL_PATTERN_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); +} + +static struct sk_buff * +ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar, + u32 vdev_id, + struct wmi_pno_scan_req *pno) +{ + struct nlo_configured_parameters *nlo_list; + struct wmi_wow_nlo_config_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u32 *channel_list; + size_t len, nlo_list_len, channel_list_len; + u8 *ptr; + u32 i; + + len = sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_parameters(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; + len += channel_list_len; + + nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; + len += nlo_list_len; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (u8 *)skb->data; + cmd = (struct wmi_wow_nlo_config_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = pno->vdev_id; + cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = pno->active_max_time; + cmd->passive_dwell_time = pno->passive_max_time; + + if (pno->do_passive_scan) + cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; + + cmd->fast_scan_period = pno->fast_scan_period; + cmd->slow_scan_period = pno->slow_scan_period; + cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; + cmd->delay_start_time = pno->delay_start_time; + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + ath11k_ce_byte_swap(cmd->mac_addr.addr, 8); + ath11k_ce_byte_swap(cmd->mac_mask.addr, 8); + } + + ptr += sizeof(*cmd); + + /* nlo_configured_parameters(nlo_list) */ + cmd->no_of_ssids = pno->uc_networks_count; + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, nlo_list_len); + + ptr += sizeof(*tlv); + nlo_list = (struct nlo_configured_parameters *)ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv)); + + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid, + roundup(nlo_list[i].ssid.ssid.ssid_len, 4)); + + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = true; + nlo_list[i].rssi_cond.rssi = + pno->a_networks[i].rssi_threshold; + } + + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + pno->a_networks[i].bcast_nw_type; + } + + ptr += nlo_list_len; + cmd->num_of_channels = pno->a_networks[0].channel_count; + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, channel_list_len); + ptr += sizeof(*tlv); + channel_list = (u32 *)ptr; + for (i = 0; i < cmd->num_of_channels; i++) + channel_list[i] = pno->a_networks[0].channels[i]; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar, + u32 vdev_id) +{ + struct wmi_wow_nlo_config_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->flags = WMI_NLO_CONFIG_STOP; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, + struct wmi_pno_scan_req *pno_scan) +{ + struct sk_buff *skb; + + if (pno_scan->enable) + skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id); + + if (IS_ERR_OR_NULL(skb)) + return -ENOMEM; + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); +} + +static void ath11k_wmi_fill_ns_offload(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload, + u8 **ptr, + bool enable, + bool ext) +{ + struct wmi_ns_offload_tuple *ns; + struct wmi_tlv *tlv; + u8 *buf_ptr = *ptr; + u32 ns_cnt, ns_ext_tuples; + int i, max_offloads; + + ns_cnt = offload->ipv6_count; + + tlv = (struct wmi_tlv *)buf_ptr; + + if (ext) { + ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns)); + i = WMI_MAX_NS_OFFLOADS; + max_offloads = offload->ipv6_count; + } else { + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns)); + i = 0; + max_offloads = WMI_MAX_NS_OFFLOADS; + } + + buf_ptr += sizeof(*tlv); + + for (; i < max_offloads; i++) { + ns = (struct wmi_ns_offload_tuple *)buf_ptr; + ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE); + + if (enable) { + if (i < ns_cnt) + ns->flags |= WMI_NSOL_FLAGS_VALID; + + memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); + memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); + ath11k_ce_byte_swap(ns->target_ipaddr[0], 16); + ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16); + + if (offload->ipv6_type[i]) + ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST; + + memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); + ath11k_ce_byte_swap(ns->target_mac.addr, 8); + + if (ns->target_mac.word0 != 0 || + ns->target_mac.word1 != 0) { + ns->flags |= WMI_NSOL_FLAGS_MAC_VALID; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi index %d ns_solicited %pI6 target %pI6", + i, ns->solicitation_ipaddr, + ns->target_ipaddr[0]); + } + + buf_ptr += sizeof(*ns); + } + + *ptr = buf_ptr; +} + +static void ath11k_wmi_fill_arp_offload(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload, + u8 **ptr, + bool enable) +{ + struct wmi_arp_offload_tuple *arp; + struct wmi_tlv *tlv; + u8 *buf_ptr = *ptr; + int i; + + /* fill arp tuple */ + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); + buf_ptr += sizeof(*tlv); + + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp = (struct wmi_arp_offload_tuple *)buf_ptr; + arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); + + if (enable && i < offload->ipv4_count) { + /* Copy the target ip addr and flags */ + arp->flags = WMI_ARPOL_FLAGS_VALID; + memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); + ath11k_ce_byte_swap(arp->target_ipaddr, 4); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4", + arp->target_ipaddr); + } + + buf_ptr += sizeof(*arp); + } + + *ptr = buf_ptr; +} + +int ath11k_wmi_arp_ns_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable) +{ + struct ath11k_arp_ns_offload *offload; + struct wmi_set_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + size_t len; + u8 ns_cnt, ns_ext_tuples = 0; + + offload = &arvif->arp_ns_offload; + ns_cnt = offload->ipv6_count; + + len = sizeof(*cmd) + + sizeof(*tlv) + + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) + + sizeof(*tlv) + + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple); + + if (ns_cnt > WMI_MAX_NS_OFFLOADS) { + ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; + len += sizeof(*tlv) + + ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple); + } + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + buf_ptr = skb->data; + cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->flags = 0; + cmd->vdev_id = arvif->vdev_id; + cmd->num_ns_ext_tuples = ns_ext_tuples; + + buf_ptr += sizeof(*cmd); + + ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); + ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); + + if (ns_ext_tuples) + ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); +} + +int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; + int len; + struct sk_buff *skb; + __le64 replay_ctr; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = arvif->vdev_id; + + if (enable) { + cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE; + + /* the length in rekey_data and cmd is equal */ + memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); + ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES); + memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); + ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES); + + replay_ctr = cpu_to_le64(rekey_data->replay_ctr); + memcpy(cmd->replay_ctr, &replay_ctr, + sizeof(replay_ctr)); + ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES); + } else { + cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", + arvif->vdev_id, enable); + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, + struct ath11k_vif *arvif) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + int len; + struct sk_buff *skb; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = arvif->vdev_id; + cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n", + arvif->vdev_id); + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val) +{ struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_sar_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + u32 len, sar_len_aligned, rsvd_len_aligned; + + sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32)); + rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32)); + len = sizeof(*cmd) + + TLV_HDR_SIZE + sar_len_aligned + + TLV_HDR_SIZE + rsvd_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = ar->pdev->pdev_id; + cmd->sar_len = BIOS_SAR_TABLE_LEN; + cmd->rsvd_len = BIOS_SAR_RSVD1_LEN; + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, sar_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN); + + buf_ptr += sar_len_aligned; + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); +} + +int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_geo_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + u32 len, rsvd_len_aligned; + + rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = ar->pdev->pdev_id; + cmd->rsvd_len = BIOS_SAR_RSVD2_LEN; + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); +} + +int ath11k_wmi_sta_keepalive(struct ath11k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_sta_keepalive_cmd *cmd; + struct wmi_sta_keepalive_arp_resp *arp; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd) + sizeof(*arp); + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_keepalive_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_STA_KEEPALIVE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = arg->vdev_id; + cmd->enabled = arg->enabled; + cmd->interval = arg->interval; + cmd->method = arg->method; + + if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || + arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { + arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1); + arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_STA_KEEPALVE_ARP_RESPONSE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); + arp->src_ip4_addr = arg->src_ip4_addr; + arp->dest_ip4_addr = arg->dest_ip4_addr; + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); +} |