Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: BSD-3-Clause-Clear
0002 /*
0003  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
0004  * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
0005  */
0006 #include <linux/skbuff.h>
0007 #include <linux/ctype.h>
0008 #include <net/mac80211.h>
0009 #include <net/cfg80211.h>
0010 #include <linux/completion.h>
0011 #include <linux/if_ether.h>
0012 #include <linux/types.h>
0013 #include <linux/pci.h>
0014 #include <linux/uuid.h>
0015 #include <linux/time.h>
0016 #include <linux/of.h>
0017 #include "core.h"
0018 #include "debug.h"
0019 #include "mac.h"
0020 #include "hw.h"
0021 #include "peer.h"
0022 
0023 struct wmi_tlv_policy {
0024     size_t min_len;
0025 };
0026 
0027 struct wmi_tlv_svc_ready_parse {
0028     bool wmi_svc_bitmap_done;
0029 };
0030 
0031 struct wmi_tlv_dma_ring_caps_parse {
0032     struct wmi_dma_ring_capabilities *dma_ring_caps;
0033     u32 n_dma_ring_caps;
0034 };
0035 
0036 struct wmi_tlv_svc_rdy_ext_parse {
0037     struct ath11k_service_ext_param param;
0038     struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
0039     struct wmi_hw_mode_capabilities *hw_mode_caps;
0040     u32 n_hw_mode_caps;
0041     u32 tot_phy_id;
0042     struct wmi_hw_mode_capabilities pref_hw_mode_caps;
0043     struct wmi_mac_phy_capabilities *mac_phy_caps;
0044     u32 n_mac_phy_caps;
0045     struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
0046     struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
0047     u32 n_ext_hal_reg_caps;
0048     struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
0049     bool hw_mode_done;
0050     bool mac_phy_done;
0051     bool ext_hal_reg_done;
0052     bool mac_phy_chainmask_combo_done;
0053     bool mac_phy_chainmask_cap_done;
0054     bool oem_dma_ring_cap_done;
0055     bool dma_ring_cap_done;
0056 };
0057 
0058 struct wmi_tlv_svc_rdy_ext2_parse {
0059     struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
0060     bool dma_ring_cap_done;
0061 };
0062 
0063 struct wmi_tlv_rdy_parse {
0064     u32 num_extra_mac_addr;
0065 };
0066 
0067 struct wmi_tlv_dma_buf_release_parse {
0068     struct ath11k_wmi_dma_buf_release_fixed_param fixed;
0069     struct wmi_dma_buf_release_entry *buf_entry;
0070     struct wmi_dma_buf_release_meta_data *meta_data;
0071     u32 num_buf_entry;
0072     u32 num_meta;
0073     bool buf_entry_done;
0074     bool meta_data_done;
0075 };
0076 
0077 struct wmi_tlv_fw_stats_parse {
0078     const struct wmi_stats_event *ev;
0079     const struct wmi_per_chain_rssi_stats *rssi;
0080     struct ath11k_fw_stats *stats;
0081     int rssi_num;
0082     bool chain_rssi_done;
0083 };
0084 
0085 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
0086     [WMI_TAG_ARRAY_BYTE]
0087         = { .min_len = 0 },
0088     [WMI_TAG_ARRAY_UINT32]
0089         = { .min_len = 0 },
0090     [WMI_TAG_SERVICE_READY_EVENT]
0091         = { .min_len = sizeof(struct wmi_service_ready_event) },
0092     [WMI_TAG_SERVICE_READY_EXT_EVENT]
0093         = { .min_len =  sizeof(struct wmi_service_ready_ext_event) },
0094     [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
0095         = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
0096     [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
0097         = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
0098     [WMI_TAG_VDEV_START_RESPONSE_EVENT]
0099         = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
0100     [WMI_TAG_PEER_DELETE_RESP_EVENT]
0101         = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
0102     [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
0103         = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
0104     [WMI_TAG_VDEV_STOPPED_EVENT]
0105         = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
0106     [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
0107         = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
0108     [WMI_TAG_MGMT_RX_HDR]
0109         = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
0110     [WMI_TAG_MGMT_TX_COMPL_EVENT]
0111         = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
0112     [WMI_TAG_SCAN_EVENT]
0113         = { .min_len = sizeof(struct wmi_scan_event) },
0114     [WMI_TAG_PEER_STA_KICKOUT_EVENT]
0115         = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
0116     [WMI_TAG_ROAM_EVENT]
0117         = { .min_len = sizeof(struct wmi_roam_event) },
0118     [WMI_TAG_CHAN_INFO_EVENT]
0119         = { .min_len = sizeof(struct wmi_chan_info_event) },
0120     [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
0121         = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
0122     [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
0123         = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
0124     [WMI_TAG_READY_EVENT] = {
0125         .min_len = sizeof(struct wmi_ready_event_min) },
0126     [WMI_TAG_SERVICE_AVAILABLE_EVENT]
0127         = {.min_len = sizeof(struct wmi_service_available_event) },
0128     [WMI_TAG_PEER_ASSOC_CONF_EVENT]
0129         = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
0130     [WMI_TAG_STATS_EVENT]
0131         = { .min_len = sizeof(struct wmi_stats_event) },
0132     [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
0133         = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
0134     [WMI_TAG_HOST_SWFDA_EVENT] = {
0135         .min_len = sizeof(struct wmi_fils_discovery_event) },
0136     [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
0137         .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
0138     [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
0139         .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
0140     [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
0141         .min_len = sizeof(struct wmi_obss_color_collision_event) },
0142     [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
0143         .min_len = sizeof(struct wmi_11d_new_cc_ev) },
0144     [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
0145         .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
0146     [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
0147         .min_len = sizeof(struct wmi_twt_add_dialog_event) },
0148 };
0149 
0150 #define PRIMAP(_hw_mode_) \
0151     [_hw_mode_] = _hw_mode_##_PRI
0152 
0153 static const int ath11k_hw_mode_pri_map[] = {
0154     PRIMAP(WMI_HOST_HW_MODE_SINGLE),
0155     PRIMAP(WMI_HOST_HW_MODE_DBS),
0156     PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
0157     PRIMAP(WMI_HOST_HW_MODE_SBS),
0158     PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
0159     PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
0160     /* keep last */
0161     PRIMAP(WMI_HOST_HW_MODE_MAX),
0162 };
0163 
0164 static int
0165 ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
0166             int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
0167                 const void *ptr, void *data),
0168             void *data)
0169 {
0170     const void *begin = ptr;
0171     const struct wmi_tlv *tlv;
0172     u16 tlv_tag, tlv_len;
0173     int ret;
0174 
0175     while (len > 0) {
0176         if (len < sizeof(*tlv)) {
0177             ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
0178                    ptr - begin, len, sizeof(*tlv));
0179             return -EINVAL;
0180         }
0181 
0182         tlv = ptr;
0183         tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
0184         tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
0185         ptr += sizeof(*tlv);
0186         len -= sizeof(*tlv);
0187 
0188         if (tlv_len > len) {
0189             ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
0190                    tlv_tag, ptr - begin, len, tlv_len);
0191             return -EINVAL;
0192         }
0193 
0194         if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
0195             wmi_tlv_policies[tlv_tag].min_len &&
0196             wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
0197             ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
0198                    tlv_tag, ptr - begin, tlv_len,
0199                    wmi_tlv_policies[tlv_tag].min_len);
0200             return -EINVAL;
0201         }
0202 
0203         ret = iter(ab, tlv_tag, tlv_len, ptr, data);
0204         if (ret)
0205             return ret;
0206 
0207         ptr += tlv_len;
0208         len -= tlv_len;
0209     }
0210 
0211     return 0;
0212 }
0213 
0214 static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
0215                      const void *ptr, void *data)
0216 {
0217     const void **tb = data;
0218 
0219     if (tag < WMI_TAG_MAX)
0220         tb[tag] = ptr;
0221 
0222     return 0;
0223 }
0224 
0225 static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
0226                 const void *ptr, size_t len)
0227 {
0228     return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
0229                    (void *)tb);
0230 }
0231 
0232 static const void **
0233 ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
0234                size_t len, gfp_t gfp)
0235 {
0236     const void **tb;
0237     int ret;
0238 
0239     tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
0240     if (!tb)
0241         return ERR_PTR(-ENOMEM);
0242 
0243     ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
0244     if (ret) {
0245         kfree(tb);
0246         return ERR_PTR(ret);
0247     }
0248 
0249     return tb;
0250 }
0251 
0252 static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
0253                       u32 cmd_id)
0254 {
0255     struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
0256     struct ath11k_base *ab = wmi->wmi_ab->ab;
0257     struct wmi_cmd_hdr *cmd_hdr;
0258     int ret;
0259     u32 cmd = 0;
0260 
0261     if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
0262         return -ENOMEM;
0263 
0264     cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
0265 
0266     cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
0267     cmd_hdr->cmd_id = cmd;
0268 
0269     trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
0270 
0271     memset(skb_cb, 0, sizeof(*skb_cb));
0272     ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
0273 
0274     if (ret)
0275         goto err_pull;
0276 
0277     return 0;
0278 
0279 err_pull:
0280     skb_pull(skb, sizeof(struct wmi_cmd_hdr));
0281     return ret;
0282 }
0283 
0284 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
0285             u32 cmd_id)
0286 {
0287     struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
0288     int ret = -EOPNOTSUPP;
0289     struct ath11k_base *ab = wmi_sc->ab;
0290 
0291     might_sleep();
0292 
0293     if (ab->hw_params.credit_flow) {
0294         wait_event_timeout(wmi_sc->tx_credits_wq, ({
0295             ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
0296 
0297             if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
0298                         &wmi_sc->ab->dev_flags))
0299                 ret = -ESHUTDOWN;
0300 
0301             (ret != -EAGAIN);
0302             }), WMI_SEND_TIMEOUT_HZ);
0303     } else {
0304         wait_event_timeout(wmi->tx_ce_desc_wq, ({
0305             ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
0306 
0307             if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
0308                         &wmi_sc->ab->dev_flags))
0309                 ret = -ESHUTDOWN;
0310 
0311             (ret != -ENOBUFS);
0312             }), WMI_SEND_TIMEOUT_HZ);
0313     }
0314 
0315     if (ret == -EAGAIN)
0316         ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
0317 
0318     if (ret == -ENOBUFS)
0319         ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
0320                 cmd_id);
0321 
0322     return ret;
0323 }
0324 
0325 static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
0326                      const void *ptr,
0327                      struct ath11k_service_ext_param *param)
0328 {
0329     const struct wmi_service_ready_ext_event *ev = ptr;
0330 
0331     if (!ev)
0332         return -EINVAL;
0333 
0334     /* Move this to host based bitmap */
0335     param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
0336     param->default_fw_config_bits = ev->default_fw_config_bits;
0337     param->he_cap_info = ev->he_cap_info;
0338     param->mpdu_density = ev->mpdu_density;
0339     param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
0340     memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
0341 
0342     return 0;
0343 }
0344 
0345 static int
0346 ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
0347                       struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
0348                       struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
0349                       struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
0350                       struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
0351                       u8 hw_mode_id, u8 phy_id,
0352                       struct ath11k_pdev *pdev)
0353 {
0354     struct wmi_mac_phy_capabilities *mac_phy_caps;
0355     struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
0356     struct ath11k_band_cap *cap_band;
0357     struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
0358     u32 phy_map;
0359     u32 hw_idx, phy_idx = 0;
0360 
0361     if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
0362         return -EINVAL;
0363 
0364     for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
0365         if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
0366             break;
0367 
0368         phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
0369         while (phy_map) {
0370             phy_map >>= 1;
0371             phy_idx++;
0372         }
0373     }
0374 
0375     if (hw_idx == hw_caps->num_hw_modes)
0376         return -EINVAL;
0377 
0378     phy_idx += phy_id;
0379     if (phy_id >= hal_reg_caps->num_phy)
0380         return -EINVAL;
0381 
0382     mac_phy_caps = wmi_mac_phy_caps + phy_idx;
0383 
0384     pdev->pdev_id = mac_phy_caps->pdev_id;
0385     pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
0386     pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
0387     ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
0388         mac_phy_caps->supported_bands;
0389     ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
0390     ab->target_pdev_count++;
0391 
0392     if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
0393         !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
0394         return -EINVAL;
0395 
0396     /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
0397      * band to band for a single radio, need to see how this should be
0398      * handled.
0399      */
0400     if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
0401         pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
0402         pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
0403     }
0404 
0405     if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
0406         pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
0407         pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
0408         pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
0409         pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
0410         pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
0411         pdev_cap->nss_ratio_enabled =
0412             WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
0413         pdev_cap->nss_ratio_info =
0414             WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
0415     }
0416 
0417     /* tx/rx chainmask reported from fw depends on the actual hw chains used,
0418      * For example, for 4x4 capable macphys, first 4 chains can be used for first
0419      * mac and the remaing 4 chains can be used for the second mac or vice-versa.
0420      * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
0421      * will be advertised for second mac or vice-versa. Compute the shift value
0422      * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
0423      * mac80211.
0424      */
0425     pdev_cap->tx_chain_mask_shift =
0426             find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
0427     pdev_cap->rx_chain_mask_shift =
0428             find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
0429 
0430     if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
0431         cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
0432         cap_band->phy_id = mac_phy_caps->phy_id;
0433         cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
0434         cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
0435         cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
0436         cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
0437         cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
0438         memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
0439                sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
0440         memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
0441                sizeof(struct ath11k_ppe_threshold));
0442     }
0443 
0444     if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
0445         cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
0446         cap_band->phy_id = mac_phy_caps->phy_id;
0447         cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
0448         cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
0449         cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
0450         cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
0451         cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
0452         memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
0453                sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
0454         memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
0455                sizeof(struct ath11k_ppe_threshold));
0456 
0457         cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
0458         cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
0459         cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
0460         cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
0461         cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
0462         cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
0463         memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
0464                sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
0465         memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
0466                sizeof(struct ath11k_ppe_threshold));
0467     }
0468 
0469     return 0;
0470 }
0471 
0472 static int
0473 ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
0474                 struct wmi_soc_hal_reg_capabilities *reg_caps,
0475                 struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
0476                 u8 phy_idx,
0477                 struct ath11k_hal_reg_capabilities_ext *param)
0478 {
0479     struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
0480 
0481     if (!reg_caps || !wmi_ext_reg_cap)
0482         return -EINVAL;
0483 
0484     if (phy_idx >= reg_caps->num_phy)
0485         return -EINVAL;
0486 
0487     ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
0488 
0489     param->phy_id = ext_reg_cap->phy_id;
0490     param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
0491     param->eeprom_reg_domain_ext =
0492                   ext_reg_cap->eeprom_reg_domain_ext;
0493     param->regcap1 = ext_reg_cap->regcap1;
0494     param->regcap2 = ext_reg_cap->regcap2;
0495     /* check if param->wireless_mode is needed */
0496     param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
0497     param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
0498     param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
0499     param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
0500 
0501     return 0;
0502 }
0503 
0504 static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
0505                      const void *evt_buf,
0506                      struct ath11k_targ_cap *cap)
0507 {
0508     const struct wmi_service_ready_event *ev = evt_buf;
0509 
0510     if (!ev) {
0511         ath11k_err(ab, "%s: failed by NULL param\n",
0512                __func__);
0513         return -EINVAL;
0514     }
0515 
0516     cap->phy_capability = ev->phy_capability;
0517     cap->max_frag_entry = ev->max_frag_entry;
0518     cap->num_rf_chains = ev->num_rf_chains;
0519     cap->ht_cap_info = ev->ht_cap_info;
0520     cap->vht_cap_info = ev->vht_cap_info;
0521     cap->vht_supp_mcs = ev->vht_supp_mcs;
0522     cap->hw_min_tx_power = ev->hw_min_tx_power;
0523     cap->hw_max_tx_power = ev->hw_max_tx_power;
0524     cap->sys_cap_info = ev->sys_cap_info;
0525     cap->min_pkt_size_enable = ev->min_pkt_size_enable;
0526     cap->max_bcn_ie_size = ev->max_bcn_ie_size;
0527     cap->max_num_scan_channels = ev->max_num_scan_channels;
0528     cap->max_supported_macs = ev->max_supported_macs;
0529     cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
0530     cap->txrx_chainmask = ev->txrx_chainmask;
0531     cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
0532     cap->num_msdu_desc = ev->num_msdu_desc;
0533 
0534     return 0;
0535 }
0536 
0537 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
0538  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
0539  * 4-byte word.
0540  */
0541 static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
0542                        const u32 *wmi_svc_bm)
0543 {
0544     int i, j;
0545 
0546     for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
0547         do {
0548             if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
0549                 set_bit(j, wmi->wmi_ab->svc_map);
0550         } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
0551     }
0552 }
0553 
0554 static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
0555                     const void *ptr, void *data)
0556 {
0557     struct wmi_tlv_svc_ready_parse *svc_ready = data;
0558     struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
0559     u16 expect_len;
0560 
0561     switch (tag) {
0562     case WMI_TAG_SERVICE_READY_EVENT:
0563         if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
0564             return -EINVAL;
0565         break;
0566 
0567     case WMI_TAG_ARRAY_UINT32:
0568         if (!svc_ready->wmi_svc_bitmap_done) {
0569             expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
0570             if (len < expect_len) {
0571                 ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
0572                         len, tag);
0573                 return -EINVAL;
0574             }
0575 
0576             ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
0577 
0578             svc_ready->wmi_svc_bitmap_done = true;
0579         }
0580         break;
0581     default:
0582         break;
0583     }
0584 
0585     return 0;
0586 }
0587 
0588 static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
0589 {
0590     struct wmi_tlv_svc_ready_parse svc_ready = { };
0591     int ret;
0592 
0593     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
0594                   ath11k_wmi_tlv_svc_rdy_parse,
0595                   &svc_ready);
0596     if (ret) {
0597         ath11k_warn(ab, "failed to parse tlv %d\n", ret);
0598         return ret;
0599     }
0600 
0601     return 0;
0602 }
0603 
0604 struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
0605 {
0606     struct sk_buff *skb;
0607     struct ath11k_base *ab = wmi_sc->ab;
0608     u32 round_len = roundup(len, 4);
0609 
0610     skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
0611     if (!skb)
0612         return NULL;
0613 
0614     skb_reserve(skb, WMI_SKB_HEADROOM);
0615     if (!IS_ALIGNED((unsigned long)skb->data, 4))
0616         ath11k_warn(ab, "unaligned WMI skb data\n");
0617 
0618     skb_put(skb, round_len);
0619     memset(skb->data, 0, round_len);
0620 
0621     return skb;
0622 }
0623 
0624 static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
0625                     struct ieee80211_tx_info *info)
0626 {
0627     struct ath11k_base *ab = ar->ab;
0628     u32 freq = 0;
0629 
0630     if (ab->hw_params.support_off_channel_tx &&
0631         ar->scan.is_roc &&
0632         (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
0633         freq = ar->scan.roc_freq;
0634 
0635     return freq;
0636 }
0637 
0638 int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
0639              struct sk_buff *frame)
0640 {
0641     struct ath11k_pdev_wmi *wmi = ar->wmi;
0642     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
0643     struct wmi_mgmt_send_cmd *cmd;
0644     struct wmi_tlv *frame_tlv;
0645     struct sk_buff *skb;
0646     u32 buf_len;
0647     int ret, len;
0648 
0649     buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
0650           frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
0651 
0652     len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
0653 
0654     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
0655     if (!skb)
0656         return -ENOMEM;
0657 
0658     cmd = (struct wmi_mgmt_send_cmd *)skb->data;
0659     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
0660               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
0661     cmd->vdev_id = vdev_id;
0662     cmd->desc_id = buf_id;
0663     cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
0664     cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
0665     cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
0666     cmd->frame_len = frame->len;
0667     cmd->buf_len = buf_len;
0668     cmd->tx_params_valid = 0;
0669 
0670     frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
0671     frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
0672                 FIELD_PREP(WMI_TLV_LEN, buf_len);
0673 
0674     memcpy(frame_tlv->value, frame->data, buf_len);
0675 
0676     ath11k_ce_byte_swap(frame_tlv->value, buf_len);
0677 
0678     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
0679     if (ret) {
0680         ath11k_warn(ar->ab,
0681                 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
0682         dev_kfree_skb(skb);
0683     }
0684 
0685     return ret;
0686 }
0687 
0688 int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
0689                struct vdev_create_params *param)
0690 {
0691     struct ath11k_pdev_wmi *wmi = ar->wmi;
0692     struct wmi_vdev_create_cmd *cmd;
0693     struct sk_buff *skb;
0694     struct wmi_vdev_txrx_streams *txrx_streams;
0695     struct wmi_tlv *tlv;
0696     int ret, len;
0697     void *ptr;
0698 
0699     /* It can be optimized my sending tx/rx chain configuration
0700      * only for supported bands instead of always sending it for
0701      * both the bands.
0702      */
0703     len = sizeof(*cmd) + TLV_HDR_SIZE +
0704         (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
0705 
0706     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
0707     if (!skb)
0708         return -ENOMEM;
0709 
0710     cmd = (struct wmi_vdev_create_cmd *)skb->data;
0711     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
0712               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
0713 
0714     cmd->vdev_id = param->if_id;
0715     cmd->vdev_type = param->type;
0716     cmd->vdev_subtype = param->subtype;
0717     cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
0718     cmd->pdev_id = param->pdev_id;
0719     ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
0720 
0721     ptr = skb->data + sizeof(*cmd);
0722     len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
0723 
0724     tlv = ptr;
0725     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
0726               FIELD_PREP(WMI_TLV_LEN, len);
0727 
0728     ptr += TLV_HDR_SIZE;
0729     txrx_streams = ptr;
0730     len = sizeof(*txrx_streams);
0731     txrx_streams->tlv_header =
0732         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
0733         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
0734     txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
0735     txrx_streams->supported_tx_streams =
0736                  param->chains[NL80211_BAND_2GHZ].tx;
0737     txrx_streams->supported_rx_streams =
0738                  param->chains[NL80211_BAND_2GHZ].rx;
0739 
0740     txrx_streams++;
0741     txrx_streams->tlv_header =
0742         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
0743         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
0744     txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
0745     txrx_streams->supported_tx_streams =
0746                  param->chains[NL80211_BAND_5GHZ].tx;
0747     txrx_streams->supported_rx_streams =
0748                  param->chains[NL80211_BAND_5GHZ].rx;
0749 
0750     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
0751     if (ret) {
0752         ath11k_warn(ar->ab,
0753                 "failed to submit WMI_VDEV_CREATE_CMDID\n");
0754         dev_kfree_skb(skb);
0755     }
0756 
0757     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
0758            "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
0759            param->if_id, param->type, param->subtype,
0760            macaddr, param->pdev_id);
0761 
0762     return ret;
0763 }
0764 
0765 int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
0766 {
0767     struct ath11k_pdev_wmi *wmi = ar->wmi;
0768     struct wmi_vdev_delete_cmd *cmd;
0769     struct sk_buff *skb;
0770     int ret;
0771 
0772     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
0773     if (!skb)
0774         return -ENOMEM;
0775 
0776     cmd = (struct wmi_vdev_delete_cmd *)skb->data;
0777     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
0778               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
0779     cmd->vdev_id = vdev_id;
0780 
0781     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
0782     if (ret) {
0783         ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
0784         dev_kfree_skb(skb);
0785     }
0786 
0787     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
0788 
0789     return ret;
0790 }
0791 
0792 int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
0793 {
0794     struct ath11k_pdev_wmi *wmi = ar->wmi;
0795     struct wmi_vdev_stop_cmd *cmd;
0796     struct sk_buff *skb;
0797     int ret;
0798 
0799     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
0800     if (!skb)
0801         return -ENOMEM;
0802 
0803     cmd = (struct wmi_vdev_stop_cmd *)skb->data;
0804 
0805     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
0806               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
0807     cmd->vdev_id = vdev_id;
0808 
0809     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
0810     if (ret) {
0811         ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
0812         dev_kfree_skb(skb);
0813     }
0814 
0815     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
0816 
0817     return ret;
0818 }
0819 
0820 int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
0821 {
0822     struct ath11k_pdev_wmi *wmi = ar->wmi;
0823     struct wmi_vdev_down_cmd *cmd;
0824     struct sk_buff *skb;
0825     int ret;
0826 
0827     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
0828     if (!skb)
0829         return -ENOMEM;
0830 
0831     cmd = (struct wmi_vdev_down_cmd *)skb->data;
0832 
0833     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
0834               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
0835     cmd->vdev_id = vdev_id;
0836 
0837     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
0838     if (ret) {
0839         ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
0840         dev_kfree_skb(skb);
0841     }
0842 
0843     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
0844 
0845     return ret;
0846 }
0847 
0848 static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
0849                        struct wmi_vdev_start_req_arg *arg)
0850 {
0851     u32 center_freq1 = arg->channel.band_center_freq1;
0852 
0853     memset(chan, 0, sizeof(*chan));
0854 
0855     chan->mhz = arg->channel.freq;
0856     chan->band_center_freq1 = arg->channel.band_center_freq1;
0857 
0858     if (arg->channel.mode == MODE_11AX_HE160) {
0859         if (arg->channel.freq > arg->channel.band_center_freq1)
0860             chan->band_center_freq1 = center_freq1 + 40;
0861         else
0862             chan->band_center_freq1 = center_freq1 - 40;
0863 
0864         chan->band_center_freq2 = arg->channel.band_center_freq1;
0865 
0866     } else if (arg->channel.mode == MODE_11AC_VHT80_80) {
0867         chan->band_center_freq2 = arg->channel.band_center_freq2;
0868     } else {
0869         chan->band_center_freq2 = 0;
0870     }
0871 
0872     chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
0873     if (arg->channel.passive)
0874         chan->info |= WMI_CHAN_INFO_PASSIVE;
0875     if (arg->channel.allow_ibss)
0876         chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
0877     if (arg->channel.allow_ht)
0878         chan->info |= WMI_CHAN_INFO_ALLOW_HT;
0879     if (arg->channel.allow_vht)
0880         chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
0881     if (arg->channel.allow_he)
0882         chan->info |= WMI_CHAN_INFO_ALLOW_HE;
0883     if (arg->channel.ht40plus)
0884         chan->info |= WMI_CHAN_INFO_HT40_PLUS;
0885     if (arg->channel.chan_radar)
0886         chan->info |= WMI_CHAN_INFO_DFS;
0887     if (arg->channel.freq2_radar)
0888         chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
0889 
0890     chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
0891                       arg->channel.max_power) |
0892         FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
0893                arg->channel.max_reg_power);
0894 
0895     chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
0896                       arg->channel.max_antenna_gain) |
0897         FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
0898                arg->channel.max_power);
0899 }
0900 
0901 int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
0902               bool restart)
0903 {
0904     struct ath11k_pdev_wmi *wmi = ar->wmi;
0905     struct wmi_vdev_start_request_cmd *cmd;
0906     struct sk_buff *skb;
0907     struct wmi_channel *chan;
0908     struct wmi_tlv *tlv;
0909     void *ptr;
0910     int ret, len;
0911 
0912     if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
0913         return -EINVAL;
0914 
0915     len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
0916 
0917     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
0918     if (!skb)
0919         return -ENOMEM;
0920 
0921     cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
0922     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
0923                      WMI_TAG_VDEV_START_REQUEST_CMD) |
0924               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
0925     cmd->vdev_id = arg->vdev_id;
0926     cmd->beacon_interval = arg->bcn_intval;
0927     cmd->bcn_tx_rate = arg->bcn_tx_rate;
0928     cmd->dtim_period = arg->dtim_period;
0929     cmd->num_noa_descriptors = arg->num_noa_descriptors;
0930     cmd->preferred_rx_streams = arg->pref_rx_streams;
0931     cmd->preferred_tx_streams = arg->pref_tx_streams;
0932     cmd->cac_duration_ms = arg->cac_duration_ms;
0933     cmd->regdomain = arg->regdomain;
0934     cmd->he_ops = arg->he_ops;
0935 
0936     if (!restart) {
0937         if (arg->ssid) {
0938             cmd->ssid.ssid_len = arg->ssid_len;
0939             memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
0940         }
0941         if (arg->hidden_ssid)
0942             cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
0943         if (arg->pmf_enabled)
0944             cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
0945     }
0946 
0947     cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
0948     if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
0949         cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
0950 
0951     ptr = skb->data + sizeof(*cmd);
0952     chan = ptr;
0953 
0954     ath11k_wmi_put_wmi_channel(chan, arg);
0955 
0956     chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
0957                FIELD_PREP(WMI_TLV_LEN,
0958                       sizeof(*chan) - TLV_HDR_SIZE);
0959     ptr += sizeof(*chan);
0960 
0961     tlv = ptr;
0962     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
0963               FIELD_PREP(WMI_TLV_LEN, 0);
0964 
0965     /* Note: This is a nested TLV containing:
0966      * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
0967      */
0968 
0969     ptr += sizeof(*tlv);
0970 
0971     if (restart)
0972         ret = ath11k_wmi_cmd_send(wmi, skb,
0973                       WMI_VDEV_RESTART_REQUEST_CMDID);
0974     else
0975         ret = ath11k_wmi_cmd_send(wmi, skb,
0976                       WMI_VDEV_START_REQUEST_CMDID);
0977     if (ret) {
0978         ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
0979                 restart ? "restart" : "start");
0980         dev_kfree_skb(skb);
0981     }
0982 
0983     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
0984            restart ? "restart" : "start", arg->vdev_id,
0985            arg->channel.freq, arg->channel.mode);
0986 
0987     return ret;
0988 }
0989 
0990 int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
0991 {
0992     struct ath11k_pdev_wmi *wmi = ar->wmi;
0993     struct wmi_vdev_up_cmd *cmd;
0994     struct sk_buff *skb;
0995     int ret;
0996 
0997     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
0998     if (!skb)
0999         return -ENOMEM;
1000 
1001     cmd = (struct wmi_vdev_up_cmd *)skb->data;
1002 
1003     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
1004               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1005     cmd->vdev_id = vdev_id;
1006     cmd->vdev_assoc_id = aid;
1007 
1008     ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1009 
1010     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1011     if (ret) {
1012         ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1013         dev_kfree_skb(skb);
1014     }
1015 
1016     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1017            "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1018            vdev_id, aid, bssid);
1019 
1020     return ret;
1021 }
1022 
1023 int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
1024                     struct peer_create_params *param)
1025 {
1026     struct ath11k_pdev_wmi *wmi = ar->wmi;
1027     struct wmi_peer_create_cmd *cmd;
1028     struct sk_buff *skb;
1029     int ret;
1030 
1031     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1032     if (!skb)
1033         return -ENOMEM;
1034 
1035     cmd = (struct wmi_peer_create_cmd *)skb->data;
1036     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
1037               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1038 
1039     ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
1040     cmd->peer_type = param->peer_type;
1041     cmd->vdev_id = param->vdev_id;
1042 
1043     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1044     if (ret) {
1045         ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1046         dev_kfree_skb(skb);
1047     }
1048 
1049     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1050            "WMI peer create vdev_id %d peer_addr %pM\n",
1051            param->vdev_id, param->peer_addr);
1052 
1053     return ret;
1054 }
1055 
1056 int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
1057                     const u8 *peer_addr, u8 vdev_id)
1058 {
1059     struct ath11k_pdev_wmi *wmi = ar->wmi;
1060     struct wmi_peer_delete_cmd *cmd;
1061     struct sk_buff *skb;
1062     int ret;
1063 
1064     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1065     if (!skb)
1066         return -ENOMEM;
1067 
1068     cmd = (struct wmi_peer_delete_cmd *)skb->data;
1069     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
1070               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1071 
1072     ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1073     cmd->vdev_id = vdev_id;
1074 
1075     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1076            "WMI peer delete vdev_id %d peer_addr %pM\n",
1077            vdev_id,  peer_addr);
1078 
1079     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1080     if (ret) {
1081         ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1082         dev_kfree_skb(skb);
1083     }
1084 
1085     return ret;
1086 }
1087 
1088 int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
1089                        struct pdev_set_regdomain_params *param)
1090 {
1091     struct ath11k_pdev_wmi *wmi = ar->wmi;
1092     struct wmi_pdev_set_regdomain_cmd *cmd;
1093     struct sk_buff *skb;
1094     int ret;
1095 
1096     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1097     if (!skb)
1098         return -ENOMEM;
1099 
1100     cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1101     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1102                      WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
1103               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1104 
1105     cmd->reg_domain = param->current_rd_in_use;
1106     cmd->reg_domain_2g = param->current_rd_2g;
1107     cmd->reg_domain_5g = param->current_rd_5g;
1108     cmd->conformance_test_limit_2g = param->ctl_2g;
1109     cmd->conformance_test_limit_5g = param->ctl_5g;
1110     cmd->dfs_domain = param->dfs_domain;
1111     cmd->pdev_id = param->pdev_id;
1112 
1113     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1114            "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1115            param->current_rd_in_use, param->current_rd_2g,
1116            param->current_rd_5g, param->dfs_domain, param->pdev_id);
1117 
1118     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1119     if (ret) {
1120         ath11k_warn(ar->ab,
1121                 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1122         dev_kfree_skb(skb);
1123     }
1124 
1125     return ret;
1126 }
1127 
1128 int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
1129                   u32 vdev_id, u32 param_id, u32 param_val)
1130 {
1131     struct ath11k_pdev_wmi *wmi = ar->wmi;
1132     struct wmi_peer_set_param_cmd *cmd;
1133     struct sk_buff *skb;
1134     int ret;
1135 
1136     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1137     if (!skb)
1138         return -ENOMEM;
1139 
1140     cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1141     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
1142               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1143     ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1144     cmd->vdev_id = vdev_id;
1145     cmd->param_id = param_id;
1146     cmd->param_value = param_val;
1147 
1148     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1149     if (ret) {
1150         ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1151         dev_kfree_skb(skb);
1152     }
1153 
1154     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1155            "WMI vdev %d peer 0x%pM set param %d value %d\n",
1156            vdev_id, peer_addr, param_id, param_val);
1157 
1158     return ret;
1159 }
1160 
1161 int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
1162                     u8 peer_addr[ETH_ALEN],
1163                     struct peer_flush_params *param)
1164 {
1165     struct ath11k_pdev_wmi *wmi = ar->wmi;
1166     struct wmi_peer_flush_tids_cmd *cmd;
1167     struct sk_buff *skb;
1168     int ret;
1169 
1170     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1171     if (!skb)
1172         return -ENOMEM;
1173 
1174     cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1175     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
1176               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1177 
1178     ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1179     cmd->peer_tid_bitmap = param->peer_tid_bitmap;
1180     cmd->vdev_id = param->vdev_id;
1181 
1182     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1183     if (ret) {
1184         ath11k_warn(ar->ab,
1185                 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1186         dev_kfree_skb(skb);
1187     }
1188 
1189     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1190            "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1191            param->vdev_id, peer_addr, param->peer_tid_bitmap);
1192 
1193     return ret;
1194 }
1195 
1196 int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
1197                        int vdev_id, const u8 *addr,
1198                        dma_addr_t paddr, u8 tid,
1199                        u8 ba_window_size_valid,
1200                        u32 ba_window_size)
1201 {
1202     struct wmi_peer_reorder_queue_setup_cmd *cmd;
1203     struct sk_buff *skb;
1204     int ret;
1205 
1206     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1207     if (!skb)
1208         return -ENOMEM;
1209 
1210     cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1211     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1212                      WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
1213               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1214 
1215     ether_addr_copy(cmd->peer_macaddr.addr, addr);
1216     cmd->vdev_id = vdev_id;
1217     cmd->tid = tid;
1218     cmd->queue_ptr_lo = lower_32_bits(paddr);
1219     cmd->queue_ptr_hi = upper_32_bits(paddr);
1220     cmd->queue_no = tid;
1221     cmd->ba_window_size_valid = ba_window_size_valid;
1222     cmd->ba_window_size = ba_window_size;
1223 
1224     ret = ath11k_wmi_cmd_send(ar->wmi, skb,
1225                   WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1226     if (ret) {
1227         ath11k_warn(ar->ab,
1228                 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1229         dev_kfree_skb(skb);
1230     }
1231 
1232     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1233            "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1234            addr, vdev_id, tid);
1235 
1236     return ret;
1237 }
1238 
1239 int
1240 ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
1241                  struct rx_reorder_queue_remove_params *param)
1242 {
1243     struct ath11k_pdev_wmi *wmi = ar->wmi;
1244     struct wmi_peer_reorder_queue_remove_cmd *cmd;
1245     struct sk_buff *skb;
1246     int ret;
1247 
1248     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1249     if (!skb)
1250         return -ENOMEM;
1251 
1252     cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1253     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1254                      WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
1255               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1256 
1257     ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
1258     cmd->vdev_id = param->vdev_id;
1259     cmd->tid_mask = param->peer_tid_bitmap;
1260 
1261     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1262            "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1263            param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
1264 
1265     ret = ath11k_wmi_cmd_send(wmi, skb,
1266                   WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1267     if (ret) {
1268         ath11k_warn(ar->ab,
1269                 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1270         dev_kfree_skb(skb);
1271     }
1272 
1273     return ret;
1274 }
1275 
1276 int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
1277                   u32 param_value, u8 pdev_id)
1278 {
1279     struct ath11k_pdev_wmi *wmi = ar->wmi;
1280     struct wmi_pdev_set_param_cmd *cmd;
1281     struct sk_buff *skb;
1282     int ret;
1283 
1284     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1285     if (!skb)
1286         return -ENOMEM;
1287 
1288     cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1289     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
1290               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1291     cmd->pdev_id = pdev_id;
1292     cmd->param_id = param_id;
1293     cmd->param_value = param_value;
1294 
1295     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1296     if (ret) {
1297         ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1298         dev_kfree_skb(skb);
1299     }
1300 
1301     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1302            "WMI pdev set param %d pdev id %d value %d\n",
1303            param_id, pdev_id, param_value);
1304 
1305     return ret;
1306 }
1307 
1308 int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
1309                 enum wmi_sta_ps_mode psmode)
1310 {
1311     struct ath11k_pdev_wmi *wmi = ar->wmi;
1312     struct wmi_pdev_set_ps_mode_cmd *cmd;
1313     struct sk_buff *skb;
1314     int ret;
1315 
1316     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1317     if (!skb)
1318         return -ENOMEM;
1319 
1320     cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1321     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
1322               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1323     cmd->vdev_id = vdev_id;
1324     cmd->sta_ps_mode = psmode;
1325 
1326     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1327     if (ret) {
1328         ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1329         dev_kfree_skb(skb);
1330     }
1331 
1332     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1333            "WMI vdev set psmode %d vdev id %d\n",
1334            psmode, vdev_id);
1335 
1336     return ret;
1337 }
1338 
1339 int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
1340                 u32 pdev_id)
1341 {
1342     struct ath11k_pdev_wmi *wmi = ar->wmi;
1343     struct wmi_pdev_suspend_cmd *cmd;
1344     struct sk_buff *skb;
1345     int ret;
1346 
1347     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1348     if (!skb)
1349         return -ENOMEM;
1350 
1351     cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1352 
1353     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
1354               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1355 
1356     cmd->suspend_opt = suspend_opt;
1357     cmd->pdev_id = pdev_id;
1358 
1359     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1360     if (ret) {
1361         ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1362         dev_kfree_skb(skb);
1363     }
1364 
1365     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1366            "WMI pdev suspend pdev_id %d\n", pdev_id);
1367 
1368     return ret;
1369 }
1370 
1371 int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
1372 {
1373     struct ath11k_pdev_wmi *wmi = ar->wmi;
1374     struct wmi_pdev_resume_cmd *cmd;
1375     struct sk_buff *skb;
1376     int ret;
1377 
1378     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1379     if (!skb)
1380         return -ENOMEM;
1381 
1382     cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1383 
1384     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
1385               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1386     cmd->pdev_id = pdev_id;
1387 
1388     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1389            "WMI pdev resume pdev id %d\n", pdev_id);
1390 
1391     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1392     if (ret) {
1393         ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1394         dev_kfree_skb(skb);
1395     }
1396 
1397     return ret;
1398 }
1399 
1400 /* TODO FW Support for the cmd is not available yet.
1401  * Can be tested once the command and corresponding
1402  * event is implemented in FW
1403  */
1404 int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
1405                       enum wmi_bss_chan_info_req_type type)
1406 {
1407     struct ath11k_pdev_wmi *wmi = ar->wmi;
1408     struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1409     struct sk_buff *skb;
1410     int ret;
1411 
1412     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1413     if (!skb)
1414         return -ENOMEM;
1415 
1416     cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1417 
1418     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1419                      WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
1420               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1421     cmd->req_type = type;
1422     cmd->pdev_id = ar->pdev->pdev_id;
1423 
1424     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1425            "WMI bss chan info req type %d\n", type);
1426 
1427     ret = ath11k_wmi_cmd_send(wmi, skb,
1428                   WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1429     if (ret) {
1430         ath11k_warn(ar->ab,
1431                 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1432         dev_kfree_skb(skb);
1433     }
1434 
1435     return ret;
1436 }
1437 
1438 int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
1439                     struct ap_ps_params *param)
1440 {
1441     struct ath11k_pdev_wmi *wmi = ar->wmi;
1442     struct wmi_ap_ps_peer_cmd *cmd;
1443     struct sk_buff *skb;
1444     int ret;
1445 
1446     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1447     if (!skb)
1448         return -ENOMEM;
1449 
1450     cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1451     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
1452               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1453 
1454     cmd->vdev_id = param->vdev_id;
1455     ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1456     cmd->param = param->param;
1457     cmd->value = param->value;
1458 
1459     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1460     if (ret) {
1461         ath11k_warn(ar->ab,
1462                 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1463         dev_kfree_skb(skb);
1464     }
1465 
1466     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1467            "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1468            param->vdev_id, peer_addr, param->param, param->value);
1469 
1470     return ret;
1471 }
1472 
1473 int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
1474                 u32 param, u32 param_value)
1475 {
1476     struct ath11k_pdev_wmi *wmi = ar->wmi;
1477     struct wmi_sta_powersave_param_cmd *cmd;
1478     struct sk_buff *skb;
1479     int ret;
1480 
1481     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1482     if (!skb)
1483         return -ENOMEM;
1484 
1485     cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1486     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1487                      WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
1488               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1489 
1490     cmd->vdev_id = vdev_id;
1491     cmd->param = param;
1492     cmd->value = param_value;
1493 
1494     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1495            "WMI set sta ps vdev_id %d param %d value %d\n",
1496            vdev_id, param, param_value);
1497 
1498     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1499     if (ret) {
1500         ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1501         dev_kfree_skb(skb);
1502     }
1503 
1504     return ret;
1505 }
1506 
1507 int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
1508 {
1509     struct ath11k_pdev_wmi *wmi = ar->wmi;
1510     struct wmi_force_fw_hang_cmd *cmd;
1511     struct sk_buff *skb;
1512     int ret, len;
1513 
1514     len = sizeof(*cmd);
1515 
1516     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
1517     if (!skb)
1518         return -ENOMEM;
1519 
1520     cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1521     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
1522               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
1523 
1524     cmd->type = type;
1525     cmd->delay_time_ms = delay_time_ms;
1526 
1527     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1528 
1529     if (ret) {
1530         ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1531         dev_kfree_skb(skb);
1532     }
1533     return ret;
1534 }
1535 
1536 int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
1537                   u32 param_id, u32 param_value)
1538 {
1539     struct ath11k_pdev_wmi *wmi = ar->wmi;
1540     struct wmi_vdev_set_param_cmd *cmd;
1541     struct sk_buff *skb;
1542     int ret;
1543 
1544     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1545     if (!skb)
1546         return -ENOMEM;
1547 
1548     cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1549     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
1550               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1551 
1552     cmd->vdev_id = vdev_id;
1553     cmd->param_id = param_id;
1554     cmd->param_value = param_value;
1555 
1556     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1557     if (ret) {
1558         ath11k_warn(ar->ab,
1559                 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1560         dev_kfree_skb(skb);
1561     }
1562 
1563     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1564            "WMI vdev id 0x%x set param %d value %d\n",
1565            vdev_id, param_id, param_value);
1566 
1567     return ret;
1568 }
1569 
1570 int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
1571                       struct stats_request_params *param)
1572 {
1573     struct ath11k_pdev_wmi *wmi = ar->wmi;
1574     struct wmi_request_stats_cmd *cmd;
1575     struct sk_buff *skb;
1576     int ret;
1577 
1578     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1579     if (!skb)
1580         return -ENOMEM;
1581 
1582     cmd = (struct wmi_request_stats_cmd *)skb->data;
1583     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
1584               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1585 
1586     cmd->stats_id = param->stats_id;
1587     cmd->vdev_id = param->vdev_id;
1588     cmd->pdev_id = param->pdev_id;
1589 
1590     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
1591     if (ret) {
1592         ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
1593         dev_kfree_skb(skb);
1594     }
1595 
1596     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1597            "WMI request stats 0x%x vdev id %d pdev id %d\n",
1598            param->stats_id, param->vdev_id, param->pdev_id);
1599 
1600     return ret;
1601 }
1602 
1603 int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
1604 {
1605     struct ath11k_pdev_wmi *wmi = ar->wmi;
1606     struct wmi_get_pdev_temperature_cmd *cmd;
1607     struct sk_buff *skb;
1608     int ret;
1609 
1610     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1611     if (!skb)
1612         return -ENOMEM;
1613 
1614     cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1615     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
1616               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1617     cmd->pdev_id = ar->pdev->pdev_id;
1618 
1619     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1620     if (ret) {
1621         ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1622         dev_kfree_skb(skb);
1623     }
1624 
1625     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1626            "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1627 
1628     return ret;
1629 }
1630 
1631 int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
1632                         u32 vdev_id, u32 bcn_ctrl_op)
1633 {
1634     struct ath11k_pdev_wmi *wmi = ar->wmi;
1635     struct wmi_bcn_offload_ctrl_cmd *cmd;
1636     struct sk_buff *skb;
1637     int ret;
1638 
1639     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1640     if (!skb)
1641         return -ENOMEM;
1642 
1643     cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1644     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1645                      WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
1646               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1647 
1648     cmd->vdev_id = vdev_id;
1649     cmd->bcn_ctrl_op = bcn_ctrl_op;
1650 
1651     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1652            "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1653            vdev_id, bcn_ctrl_op);
1654 
1655     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1656     if (ret) {
1657         ath11k_warn(ar->ab,
1658                 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1659         dev_kfree_skb(skb);
1660     }
1661 
1662     return ret;
1663 }
1664 
1665 int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
1666             struct ieee80211_mutable_offsets *offs,
1667             struct sk_buff *bcn)
1668 {
1669     struct ath11k_pdev_wmi *wmi = ar->wmi;
1670     struct wmi_bcn_tmpl_cmd *cmd;
1671     struct wmi_bcn_prb_info *bcn_prb_info;
1672     struct wmi_tlv *tlv;
1673     struct sk_buff *skb;
1674     void *ptr;
1675     int ret, len;
1676     size_t aligned_len = roundup(bcn->len, 4);
1677     struct ieee80211_vif *vif;
1678     struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
1679 
1680     if (!arvif) {
1681         ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
1682         return -EINVAL;
1683     }
1684 
1685     vif = arvif->vif;
1686 
1687     len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1688 
1689     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
1690     if (!skb)
1691         return -ENOMEM;
1692 
1693     cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1694     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
1695               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1696     cmd->vdev_id = vdev_id;
1697     cmd->tim_ie_offset = offs->tim_offset;
1698 
1699     if (vif->bss_conf.csa_active) {
1700         cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
1701         cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
1702     }
1703 
1704     cmd->buf_len = bcn->len;
1705 
1706     ptr = skb->data + sizeof(*cmd);
1707 
1708     bcn_prb_info = ptr;
1709     len = sizeof(*bcn_prb_info);
1710     bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1711                           WMI_TAG_BCN_PRB_INFO) |
1712                    FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
1713     bcn_prb_info->caps = 0;
1714     bcn_prb_info->erp = 0;
1715 
1716     ptr += sizeof(*bcn_prb_info);
1717 
1718     tlv = ptr;
1719     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
1720               FIELD_PREP(WMI_TLV_LEN, aligned_len);
1721     memcpy(tlv->value, bcn->data, bcn->len);
1722 
1723     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1724     if (ret) {
1725         ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1726         dev_kfree_skb(skb);
1727     }
1728 
1729     return ret;
1730 }
1731 
1732 int ath11k_wmi_vdev_install_key(struct ath11k *ar,
1733                 struct wmi_vdev_install_key_arg *arg)
1734 {
1735     struct ath11k_pdev_wmi *wmi = ar->wmi;
1736     struct wmi_vdev_install_key_cmd *cmd;
1737     struct wmi_tlv *tlv;
1738     struct sk_buff *skb;
1739     int ret, len;
1740     int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
1741 
1742     len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1743 
1744     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
1745     if (!skb)
1746         return -ENOMEM;
1747 
1748     cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1749     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
1750               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1751     cmd->vdev_id = arg->vdev_id;
1752     ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1753     cmd->key_idx = arg->key_idx;
1754     cmd->key_flags = arg->key_flags;
1755     cmd->key_cipher = arg->key_cipher;
1756     cmd->key_len = arg->key_len;
1757     cmd->key_txmic_len = arg->key_txmic_len;
1758     cmd->key_rxmic_len = arg->key_rxmic_len;
1759 
1760     if (arg->key_rsc_counter)
1761         memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
1762                sizeof(struct wmi_key_seq_counter));
1763 
1764     tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1765     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
1766               FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
1767     if (arg->key_data)
1768         memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
1769 
1770     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1771     if (ret) {
1772         ath11k_warn(ar->ab,
1773                 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1774         dev_kfree_skb(skb);
1775     }
1776 
1777     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
1778            "WMI vdev install key idx %d cipher %d len %d\n",
1779            arg->key_idx, arg->key_cipher, arg->key_len);
1780 
1781     return ret;
1782 }
1783 
1784 static inline void
1785 ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
1786                struct peer_assoc_params *param,
1787                bool hw_crypto_disabled)
1788 {
1789     cmd->peer_flags = 0;
1790 
1791     if (param->is_wme_set) {
1792         if (param->qos_flag)
1793             cmd->peer_flags |= WMI_PEER_QOS;
1794         if (param->apsd_flag)
1795             cmd->peer_flags |= WMI_PEER_APSD;
1796         if (param->ht_flag)
1797             cmd->peer_flags |= WMI_PEER_HT;
1798         if (param->bw_40)
1799             cmd->peer_flags |= WMI_PEER_40MHZ;
1800         if (param->bw_80)
1801             cmd->peer_flags |= WMI_PEER_80MHZ;
1802         if (param->bw_160)
1803             cmd->peer_flags |= WMI_PEER_160MHZ;
1804 
1805         /* Typically if STBC is enabled for VHT it should be enabled
1806          * for HT as well
1807          **/
1808         if (param->stbc_flag)
1809             cmd->peer_flags |= WMI_PEER_STBC;
1810 
1811         /* Typically if LDPC is enabled for VHT it should be enabled
1812          * for HT as well
1813          **/
1814         if (param->ldpc_flag)
1815             cmd->peer_flags |= WMI_PEER_LDPC;
1816 
1817         if (param->static_mimops_flag)
1818             cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
1819         if (param->dynamic_mimops_flag)
1820             cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
1821         if (param->spatial_mux_flag)
1822             cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
1823         if (param->vht_flag)
1824             cmd->peer_flags |= WMI_PEER_VHT;
1825         if (param->he_flag)
1826             cmd->peer_flags |= WMI_PEER_HE;
1827         if (param->twt_requester)
1828             cmd->peer_flags |= WMI_PEER_TWT_REQ;
1829         if (param->twt_responder)
1830             cmd->peer_flags |= WMI_PEER_TWT_RESP;
1831     }
1832 
1833     /* Suppress authorization for all AUTH modes that need 4-way handshake
1834      * (during re-association).
1835      * Authorization will be done for these modes on key installation.
1836      */
1837     if (param->auth_flag)
1838         cmd->peer_flags |= WMI_PEER_AUTH;
1839     if (param->need_ptk_4_way) {
1840         cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
1841         if (!hw_crypto_disabled && param->is_assoc)
1842             cmd->peer_flags &= ~WMI_PEER_AUTH;
1843     }
1844     if (param->need_gtk_2_way)
1845         cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
1846     /* safe mode bypass the 4-way handshake */
1847     if (param->safe_mode_enabled)
1848         cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
1849                      WMI_PEER_NEED_GTK_2_WAY);
1850 
1851     if (param->is_pmf_enabled)
1852         cmd->peer_flags |= WMI_PEER_PMF;
1853 
1854     /* Disable AMSDU for station transmit, if user configures it */
1855     /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1856      * it
1857      * if (param->amsdu_disable) Add after FW support
1858      **/
1859 
1860     /* Target asserts if node is marked HT and all MCS is set to 0.
1861      * Mark the node as non-HT if all the mcs rates are disabled through
1862      * iwpriv
1863      **/
1864     if (param->peer_ht_rates.num_rates == 0)
1865         cmd->peer_flags &= ~WMI_PEER_HT;
1866 }
1867 
1868 int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
1869                    struct peer_assoc_params *param)
1870 {
1871     struct ath11k_pdev_wmi *wmi = ar->wmi;
1872     struct wmi_peer_assoc_complete_cmd *cmd;
1873     struct wmi_vht_rate_set *mcs;
1874     struct wmi_he_rate_set *he_mcs;
1875     struct sk_buff *skb;
1876     struct wmi_tlv *tlv;
1877     void *ptr;
1878     u32 peer_legacy_rates_align;
1879     u32 peer_ht_rates_align;
1880     int i, ret, len;
1881 
1882     peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
1883                       sizeof(u32));
1884     peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
1885                       sizeof(u32));
1886 
1887     len = sizeof(*cmd) +
1888           TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
1889           TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
1890           sizeof(*mcs) + TLV_HDR_SIZE +
1891           (sizeof(*he_mcs) * param->peer_he_mcs_count);
1892 
1893     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
1894     if (!skb)
1895         return -ENOMEM;
1896 
1897     ptr = skb->data;
1898 
1899     cmd = ptr;
1900     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1901                      WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
1902               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
1903 
1904     cmd->vdev_id = param->vdev_id;
1905 
1906     cmd->peer_new_assoc = param->peer_new_assoc;
1907     cmd->peer_associd = param->peer_associd;
1908 
1909     ath11k_wmi_copy_peer_flags(cmd, param,
1910                    test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED,
1911                         &ar->ab->dev_flags));
1912 
1913     ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
1914 
1915     cmd->peer_rate_caps = param->peer_rate_caps;
1916     cmd->peer_caps = param->peer_caps;
1917     cmd->peer_listen_intval = param->peer_listen_intval;
1918     cmd->peer_ht_caps = param->peer_ht_caps;
1919     cmd->peer_max_mpdu = param->peer_max_mpdu;
1920     cmd->peer_mpdu_density = param->peer_mpdu_density;
1921     cmd->peer_vht_caps = param->peer_vht_caps;
1922     cmd->peer_phymode = param->peer_phymode;
1923 
1924     /* Update 11ax capabilities */
1925     cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
1926     cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
1927     cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
1928     cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
1929     cmd->peer_he_ops = param->peer_he_ops;
1930     memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
1931            sizeof(param->peer_he_cap_phyinfo));
1932     memcpy(&cmd->peer_ppet, &param->peer_ppet,
1933            sizeof(param->peer_ppet));
1934 
1935     /* Update peer legacy rate information */
1936     ptr += sizeof(*cmd);
1937 
1938     tlv = ptr;
1939     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
1940               FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
1941 
1942     ptr += TLV_HDR_SIZE;
1943 
1944     cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
1945     memcpy(ptr, param->peer_legacy_rates.rates,
1946            param->peer_legacy_rates.num_rates);
1947 
1948     /* Update peer HT rate information */
1949     ptr += peer_legacy_rates_align;
1950 
1951     tlv = ptr;
1952     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
1953               FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
1954     ptr += TLV_HDR_SIZE;
1955     cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
1956     memcpy(ptr, param->peer_ht_rates.rates,
1957            param->peer_ht_rates.num_rates);
1958 
1959     /* VHT Rates */
1960     ptr += peer_ht_rates_align;
1961 
1962     mcs = ptr;
1963 
1964     mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
1965               FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
1966 
1967     cmd->peer_nss = param->peer_nss;
1968 
1969     /* Update bandwidth-NSS mapping */
1970     cmd->peer_bw_rxnss_override = 0;
1971     cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
1972 
1973     if (param->vht_capable) {
1974         mcs->rx_max_rate = param->rx_max_rate;
1975         mcs->rx_mcs_set = param->rx_mcs_set;
1976         mcs->tx_max_rate = param->tx_max_rate;
1977         mcs->tx_mcs_set = param->tx_mcs_set;
1978     }
1979 
1980     /* HE Rates */
1981     cmd->peer_he_mcs = param->peer_he_mcs_count;
1982     cmd->min_data_rate = param->min_data_rate;
1983 
1984     ptr += sizeof(*mcs);
1985 
1986     len = param->peer_he_mcs_count * sizeof(*he_mcs);
1987 
1988     tlv = ptr;
1989     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
1990               FIELD_PREP(WMI_TLV_LEN, len);
1991     ptr += TLV_HDR_SIZE;
1992 
1993     /* Loop through the HE rate set */
1994     for (i = 0; i < param->peer_he_mcs_count; i++) {
1995         he_mcs = ptr;
1996         he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
1997                         WMI_TAG_HE_RATE_SET) |
1998                      FIELD_PREP(WMI_TLV_LEN,
1999                         sizeof(*he_mcs) - TLV_HDR_SIZE);
2000 
2001         he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
2002         he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
2003         ptr += sizeof(*he_mcs);
2004     }
2005 
2006     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2007     if (ret) {
2008         ath11k_warn(ar->ab,
2009                 "failed to send WMI_PEER_ASSOC_CMDID\n");
2010         dev_kfree_skb(skb);
2011     }
2012 
2013     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2014            "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
2015            cmd->vdev_id, cmd->peer_associd, param->peer_mac,
2016            cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2017            cmd->peer_listen_intval, cmd->peer_ht_caps,
2018            cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2019            cmd->peer_mpdu_density,
2020            cmd->peer_vht_caps, cmd->peer_he_cap_info,
2021            cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2022            cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2023            cmd->peer_he_cap_phy[2],
2024            cmd->peer_bw_rxnss_override);
2025 
2026     return ret;
2027 }
2028 
2029 void ath11k_wmi_start_scan_init(struct ath11k *ar,
2030                 struct scan_req_params *arg)
2031 {
2032     /* setup commonly used values */
2033     arg->scan_req_id = 1;
2034     if (ar->state_11d == ATH11K_11D_PREPARING)
2035         arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2036     else
2037         arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2038     arg->dwell_time_active = 50;
2039     arg->dwell_time_active_2g = 0;
2040     arg->dwell_time_passive = 150;
2041     arg->dwell_time_active_6g = 40;
2042     arg->dwell_time_passive_6g = 30;
2043     arg->min_rest_time = 50;
2044     arg->max_rest_time = 500;
2045     arg->repeat_probe_time = 0;
2046     arg->probe_spacing_time = 0;
2047     arg->idle_time = 0;
2048     arg->max_scan_time = 20000;
2049     arg->probe_delay = 5;
2050     arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2051                   WMI_SCAN_EVENT_COMPLETED |
2052                   WMI_SCAN_EVENT_BSS_CHANNEL |
2053                   WMI_SCAN_EVENT_FOREIGN_CHAN |
2054                   WMI_SCAN_EVENT_DEQUEUED;
2055     arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
2056     arg->num_bssid = 1;
2057 
2058     /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2059      * ZEROs in probe request
2060      */
2061     eth_broadcast_addr(arg->bssid_list[0].addr);
2062 }
2063 
2064 static inline void
2065 ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2066                        struct scan_req_params *param)
2067 {
2068     /* Scan events subscription */
2069     if (param->scan_ev_started)
2070         cmd->notify_scan_events |=  WMI_SCAN_EVENT_STARTED;
2071     if (param->scan_ev_completed)
2072         cmd->notify_scan_events |=  WMI_SCAN_EVENT_COMPLETED;
2073     if (param->scan_ev_bss_chan)
2074         cmd->notify_scan_events |=  WMI_SCAN_EVENT_BSS_CHANNEL;
2075     if (param->scan_ev_foreign_chan)
2076         cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN;
2077     if (param->scan_ev_dequeued)
2078         cmd->notify_scan_events |=  WMI_SCAN_EVENT_DEQUEUED;
2079     if (param->scan_ev_preempted)
2080         cmd->notify_scan_events |=  WMI_SCAN_EVENT_PREEMPTED;
2081     if (param->scan_ev_start_failed)
2082         cmd->notify_scan_events |=  WMI_SCAN_EVENT_START_FAILED;
2083     if (param->scan_ev_restarted)
2084         cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESTARTED;
2085     if (param->scan_ev_foreign_chn_exit)
2086         cmd->notify_scan_events |=  WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
2087     if (param->scan_ev_suspended)
2088         cmd->notify_scan_events |=  WMI_SCAN_EVENT_SUSPENDED;
2089     if (param->scan_ev_resumed)
2090         cmd->notify_scan_events |=  WMI_SCAN_EVENT_RESUMED;
2091 
2092     /** Set scan control flags */
2093     cmd->scan_ctrl_flags = 0;
2094     if (param->scan_f_passive)
2095         cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_PASSIVE;
2096     if (param->scan_f_strict_passive_pch)
2097         cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
2098     if (param->scan_f_promisc_mode)
2099         cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROMISCUOS;
2100     if (param->scan_f_capture_phy_err)
2101         cmd->scan_ctrl_flags |=  WMI_SCAN_CAPTURE_PHY_ERROR;
2102     if (param->scan_f_half_rate)
2103         cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
2104     if (param->scan_f_quarter_rate)
2105         cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
2106     if (param->scan_f_cck_rates)
2107         cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_CCK_RATES;
2108     if (param->scan_f_ofdm_rates)
2109         cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_OFDM_RATES;
2110     if (param->scan_f_chan_stat_evnt)
2111         cmd->scan_ctrl_flags |=  WMI_SCAN_CHAN_STAT_EVENT;
2112     if (param->scan_f_filter_prb_req)
2113         cmd->scan_ctrl_flags |=  WMI_SCAN_FILTER_PROBE_REQ;
2114     if (param->scan_f_bcast_probe)
2115         cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_BCAST_PROBE_REQ;
2116     if (param->scan_f_offchan_mgmt_tx)
2117         cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_MGMT_TX;
2118     if (param->scan_f_offchan_data_tx)
2119         cmd->scan_ctrl_flags |=  WMI_SCAN_OFFCHAN_DATA_TX;
2120     if (param->scan_f_force_active_dfs_chn)
2121         cmd->scan_ctrl_flags |=  WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
2122     if (param->scan_f_add_tpc_ie_in_probe)
2123         cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
2124     if (param->scan_f_add_ds_ie_in_probe)
2125         cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
2126     if (param->scan_f_add_spoofed_mac_in_probe)
2127         cmd->scan_ctrl_flags |=  WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
2128     if (param->scan_f_add_rand_seq_in_probe)
2129         cmd->scan_ctrl_flags |=  WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
2130     if (param->scan_f_en_ie_whitelist_in_probe)
2131         cmd->scan_ctrl_flags |=
2132              WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
2133 
2134     /* for adaptive scan mode using 3 bits (21 - 23 bits) */
2135     WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
2136                 param->adaptive_dwell_time_mode);
2137 }
2138 
2139 int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
2140                    struct scan_req_params *params)
2141 {
2142     struct ath11k_pdev_wmi *wmi = ar->wmi;
2143     struct wmi_start_scan_cmd *cmd;
2144     struct wmi_ssid *ssid = NULL;
2145     struct wmi_mac_addr *bssid;
2146     struct sk_buff *skb;
2147     struct wmi_tlv *tlv;
2148     void *ptr;
2149     int i, ret, len;
2150     u32 *tmp_ptr;
2151     u16 extraie_len_with_pad = 0;
2152     struct hint_short_ssid *s_ssid = NULL;
2153     struct hint_bssid *hint_bssid = NULL;
2154 
2155     len = sizeof(*cmd);
2156 
2157     len += TLV_HDR_SIZE;
2158     if (params->num_chan)
2159         len += params->num_chan * sizeof(u32);
2160 
2161     len += TLV_HDR_SIZE;
2162     if (params->num_ssids)
2163         len += params->num_ssids * sizeof(*ssid);
2164 
2165     len += TLV_HDR_SIZE;
2166     if (params->num_bssid)
2167         len += sizeof(*bssid) * params->num_bssid;
2168 
2169     len += TLV_HDR_SIZE;
2170     if (params->extraie.len && params->extraie.len <= 0xFFFF)
2171         extraie_len_with_pad =
2172             roundup(params->extraie.len, sizeof(u32));
2173     len += extraie_len_with_pad;
2174 
2175     if (params->num_hint_bssid)
2176         len += TLV_HDR_SIZE +
2177                params->num_hint_bssid * sizeof(struct hint_bssid);
2178 
2179     if (params->num_hint_s_ssid)
2180         len += TLV_HDR_SIZE +
2181                params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
2182 
2183     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
2184     if (!skb)
2185         return -ENOMEM;
2186 
2187     ptr = skb->data;
2188 
2189     cmd = ptr;
2190     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
2191               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2192 
2193     cmd->scan_id = params->scan_id;
2194     cmd->scan_req_id = params->scan_req_id;
2195     cmd->vdev_id = params->vdev_id;
2196     cmd->scan_priority = params->scan_priority;
2197     cmd->notify_scan_events = params->notify_scan_events;
2198 
2199     ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
2200 
2201     cmd->dwell_time_active = params->dwell_time_active;
2202     cmd->dwell_time_active_2g = params->dwell_time_active_2g;
2203     cmd->dwell_time_passive = params->dwell_time_passive;
2204     cmd->dwell_time_active_6g = params->dwell_time_active_6g;
2205     cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
2206     cmd->min_rest_time = params->min_rest_time;
2207     cmd->max_rest_time = params->max_rest_time;
2208     cmd->repeat_probe_time = params->repeat_probe_time;
2209     cmd->probe_spacing_time = params->probe_spacing_time;
2210     cmd->idle_time = params->idle_time;
2211     cmd->max_scan_time = params->max_scan_time;
2212     cmd->probe_delay = params->probe_delay;
2213     cmd->burst_duration = params->burst_duration;
2214     cmd->num_chan = params->num_chan;
2215     cmd->num_bssid = params->num_bssid;
2216     cmd->num_ssids = params->num_ssids;
2217     cmd->ie_len = params->extraie.len;
2218     cmd->n_probes = params->n_probes;
2219     ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
2220     ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
2221 
2222     ptr += sizeof(*cmd);
2223 
2224     len = params->num_chan * sizeof(u32);
2225 
2226     tlv = ptr;
2227     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
2228               FIELD_PREP(WMI_TLV_LEN, len);
2229     ptr += TLV_HDR_SIZE;
2230     tmp_ptr = (u32 *)ptr;
2231 
2232     for (i = 0; i < params->num_chan; ++i)
2233         tmp_ptr[i] = params->chan_list[i];
2234 
2235     ptr += len;
2236 
2237     len = params->num_ssids * sizeof(*ssid);
2238     tlv = ptr;
2239     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
2240               FIELD_PREP(WMI_TLV_LEN, len);
2241 
2242     ptr += TLV_HDR_SIZE;
2243 
2244     if (params->num_ssids) {
2245         ssid = ptr;
2246         for (i = 0; i < params->num_ssids; ++i) {
2247             ssid->ssid_len = params->ssid[i].length;
2248             memcpy(ssid->ssid, params->ssid[i].ssid,
2249                    params->ssid[i].length);
2250             ssid++;
2251         }
2252     }
2253 
2254     ptr += (params->num_ssids * sizeof(*ssid));
2255     len = params->num_bssid * sizeof(*bssid);
2256     tlv = ptr;
2257     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
2258               FIELD_PREP(WMI_TLV_LEN, len);
2259 
2260     ptr += TLV_HDR_SIZE;
2261     bssid = ptr;
2262 
2263     if (params->num_bssid) {
2264         for (i = 0; i < params->num_bssid; ++i) {
2265             ether_addr_copy(bssid->addr,
2266                     params->bssid_list[i].addr);
2267             bssid++;
2268         }
2269     }
2270 
2271     ptr += params->num_bssid * sizeof(*bssid);
2272 
2273     len = extraie_len_with_pad;
2274     tlv = ptr;
2275     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
2276               FIELD_PREP(WMI_TLV_LEN, len);
2277     ptr += TLV_HDR_SIZE;
2278 
2279     if (extraie_len_with_pad)
2280         memcpy(ptr, params->extraie.ptr,
2281                params->extraie.len);
2282 
2283     ptr += extraie_len_with_pad;
2284 
2285     if (params->num_hint_s_ssid) {
2286         len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
2287         tlv = ptr;
2288         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
2289                   FIELD_PREP(WMI_TLV_LEN, len);
2290         ptr += TLV_HDR_SIZE;
2291         s_ssid = ptr;
2292         for (i = 0; i < params->num_hint_s_ssid; ++i) {
2293             s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
2294             s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
2295             s_ssid++;
2296         }
2297         ptr += len;
2298     }
2299 
2300     if (params->num_hint_bssid) {
2301         len = params->num_hint_bssid * sizeof(struct hint_bssid);
2302         tlv = ptr;
2303         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
2304                   FIELD_PREP(WMI_TLV_LEN, len);
2305         ptr += TLV_HDR_SIZE;
2306         hint_bssid = ptr;
2307         for (i = 0; i < params->num_hint_bssid; ++i) {
2308             hint_bssid->freq_flags =
2309                 params->hint_bssid[i].freq_flags;
2310             ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
2311                     &hint_bssid->bssid.addr[0]);
2312             hint_bssid++;
2313         }
2314     }
2315 
2316     ret = ath11k_wmi_cmd_send(wmi, skb,
2317                   WMI_START_SCAN_CMDID);
2318     if (ret) {
2319         ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2320         dev_kfree_skb(skb);
2321     }
2322 
2323     return ret;
2324 }
2325 
2326 int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
2327                   struct scan_cancel_param *param)
2328 {
2329     struct ath11k_pdev_wmi *wmi = ar->wmi;
2330     struct wmi_stop_scan_cmd *cmd;
2331     struct sk_buff *skb;
2332     int ret;
2333 
2334     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2335     if (!skb)
2336         return -ENOMEM;
2337 
2338     cmd = (struct wmi_stop_scan_cmd *)skb->data;
2339 
2340     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
2341               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2342 
2343     cmd->vdev_id = param->vdev_id;
2344     cmd->requestor = param->requester;
2345     cmd->scan_id = param->scan_id;
2346     cmd->pdev_id = param->pdev_id;
2347     /* stop the scan with the corresponding scan_id */
2348     if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2349         /* Cancelling all scans */
2350         cmd->req_type =  WMI_SCAN_STOP_ALL;
2351     } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2352         /* Cancelling VAP scans */
2353         cmd->req_type =  WMI_SCN_STOP_VAP_ALL;
2354     } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2355         /* Cancelling specific scan */
2356         cmd->req_type =  WMI_SCAN_STOP_ONE;
2357     } else {
2358         ath11k_warn(ar->ab, "invalid scan cancel param %d",
2359                 param->req_type);
2360         dev_kfree_skb(skb);
2361         return -EINVAL;
2362     }
2363 
2364     ret = ath11k_wmi_cmd_send(wmi, skb,
2365                   WMI_STOP_SCAN_CMDID);
2366     if (ret) {
2367         ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2368         dev_kfree_skb(skb);
2369     }
2370 
2371     return ret;
2372 }
2373 
2374 int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
2375                        struct scan_chan_list_params *chan_list)
2376 {
2377     struct ath11k_pdev_wmi *wmi = ar->wmi;
2378     struct wmi_scan_chan_list_cmd *cmd;
2379     struct sk_buff *skb;
2380     struct wmi_channel *chan_info;
2381     struct channel_param *tchan_info;
2382     struct wmi_tlv *tlv;
2383     void *ptr;
2384     int i, ret, len;
2385     u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2386     u32 *reg1, *reg2;
2387 
2388     tchan_info = chan_list->ch_param;
2389     while (chan_list->nallchans) {
2390         len = sizeof(*cmd) + TLV_HDR_SIZE;
2391         max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2392             sizeof(*chan_info);
2393 
2394         if (chan_list->nallchans > max_chan_limit)
2395             num_send_chans = max_chan_limit;
2396         else
2397             num_send_chans = chan_list->nallchans;
2398 
2399         chan_list->nallchans -= num_send_chans;
2400         len += sizeof(*chan_info) * num_send_chans;
2401 
2402         skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
2403         if (!skb)
2404             return -ENOMEM;
2405 
2406         cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2407         cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
2408             FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2409         cmd->pdev_id = chan_list->pdev_id;
2410         cmd->num_scan_chans = num_send_chans;
2411         if (num_sends)
2412             cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
2413 
2414         ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2415                "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2416                num_send_chans, len, cmd->pdev_id, num_sends);
2417 
2418         ptr = skb->data + sizeof(*cmd);
2419 
2420         len = sizeof(*chan_info) * num_send_chans;
2421         tlv = ptr;
2422         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
2423                   FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
2424         ptr += TLV_HDR_SIZE;
2425 
2426         for (i = 0; i < num_send_chans; ++i) {
2427             chan_info = ptr;
2428             memset(chan_info, 0, sizeof(*chan_info));
2429             len = sizeof(*chan_info);
2430             chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
2431                                WMI_TAG_CHANNEL) |
2432                         FIELD_PREP(WMI_TLV_LEN,
2433                                len - TLV_HDR_SIZE);
2434 
2435             reg1 = &chan_info->reg_info_1;
2436             reg2 = &chan_info->reg_info_2;
2437             chan_info->mhz = tchan_info->mhz;
2438             chan_info->band_center_freq1 = tchan_info->cfreq1;
2439             chan_info->band_center_freq2 = tchan_info->cfreq2;
2440 
2441             if (tchan_info->is_chan_passive)
2442                 chan_info->info |= WMI_CHAN_INFO_PASSIVE;
2443             if (tchan_info->allow_he)
2444                 chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
2445             else if (tchan_info->allow_vht)
2446                 chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
2447             else if (tchan_info->allow_ht)
2448                 chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
2449             if (tchan_info->half_rate)
2450                 chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
2451             if (tchan_info->quarter_rate)
2452                 chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
2453             if (tchan_info->psc_channel)
2454                 chan_info->info |= WMI_CHAN_INFO_PSC;
2455             if (tchan_info->dfs_set)
2456                 chan_info->info |= WMI_CHAN_INFO_DFS;
2457 
2458             chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
2459                               tchan_info->phy_mode);
2460             *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
2461                         tchan_info->minpower);
2462             *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
2463                         tchan_info->maxpower);
2464             *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
2465                         tchan_info->maxregpower);
2466             *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
2467                         tchan_info->reg_class_id);
2468             *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
2469                         tchan_info->antennamax);
2470             *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
2471                         tchan_info->maxregpower);
2472 
2473             ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2474                    "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2475                    i, chan_info->mhz, chan_info->info);
2476 
2477             ptr += sizeof(*chan_info);
2478 
2479             tchan_info++;
2480         }
2481 
2482         ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2483         if (ret) {
2484             ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2485             dev_kfree_skb(skb);
2486             return ret;
2487         }
2488 
2489         num_sends++;
2490     }
2491 
2492     return 0;
2493 }
2494 
2495 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
2496                        struct wmi_wmm_params_all_arg *param)
2497 {
2498     struct ath11k_pdev_wmi *wmi = ar->wmi;
2499     struct wmi_vdev_set_wmm_params_cmd *cmd;
2500     struct wmi_wmm_params *wmm_param;
2501     struct wmi_wmm_params_arg *wmi_wmm_arg;
2502     struct sk_buff *skb;
2503     int ret, ac;
2504 
2505     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2506     if (!skb)
2507         return -ENOMEM;
2508 
2509     cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2510     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
2511                      WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
2512               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2513 
2514     cmd->vdev_id = vdev_id;
2515     cmd->wmm_param_type = 0;
2516 
2517     for (ac = 0; ac < WME_NUM_AC; ac++) {
2518         switch (ac) {
2519         case WME_AC_BE:
2520             wmi_wmm_arg = &param->ac_be;
2521             break;
2522         case WME_AC_BK:
2523             wmi_wmm_arg = &param->ac_bk;
2524             break;
2525         case WME_AC_VI:
2526             wmi_wmm_arg = &param->ac_vi;
2527             break;
2528         case WME_AC_VO:
2529             wmi_wmm_arg = &param->ac_vo;
2530             break;
2531         }
2532 
2533         wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2534         wmm_param->tlv_header =
2535                 FIELD_PREP(WMI_TLV_TAG,
2536                        WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
2537                 FIELD_PREP(WMI_TLV_LEN,
2538                        sizeof(*wmm_param) - TLV_HDR_SIZE);
2539 
2540         wmm_param->aifs = wmi_wmm_arg->aifs;
2541         wmm_param->cwmin = wmi_wmm_arg->cwmin;
2542         wmm_param->cwmax = wmi_wmm_arg->cwmax;
2543         wmm_param->txoplimit = wmi_wmm_arg->txop;
2544         wmm_param->acm = wmi_wmm_arg->acm;
2545         wmm_param->no_ack = wmi_wmm_arg->no_ack;
2546 
2547         ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2548                "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2549                ac, wmm_param->aifs, wmm_param->cwmin,
2550                wmm_param->cwmax, wmm_param->txoplimit,
2551                wmm_param->acm, wmm_param->no_ack);
2552     }
2553     ret = ath11k_wmi_cmd_send(wmi, skb,
2554                   WMI_VDEV_SET_WMM_PARAMS_CMDID);
2555     if (ret) {
2556         ath11k_warn(ar->ab,
2557                 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2558         dev_kfree_skb(skb);
2559     }
2560 
2561     return ret;
2562 }
2563 
2564 int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
2565                           u32 pdev_id)
2566 {
2567     struct ath11k_pdev_wmi *wmi = ar->wmi;
2568     struct wmi_dfs_phyerr_offload_cmd *cmd;
2569     struct sk_buff *skb;
2570     int ret;
2571 
2572     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2573     if (!skb)
2574         return -ENOMEM;
2575 
2576     cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2577     cmd->tlv_header =
2578         FIELD_PREP(WMI_TLV_TAG,
2579                WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
2580         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2581 
2582     cmd->pdev_id = pdev_id;
2583 
2584     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2585            "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2586 
2587     ret = ath11k_wmi_cmd_send(wmi, skb,
2588                   WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2589     if (ret) {
2590         ath11k_warn(ar->ab,
2591                 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2592         dev_kfree_skb(skb);
2593     }
2594 
2595     return ret;
2596 }
2597 
2598 int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
2599               u32 tid, u32 initiator, u32 reason)
2600 {
2601     struct ath11k_pdev_wmi *wmi = ar->wmi;
2602     struct wmi_delba_send_cmd *cmd;
2603     struct sk_buff *skb;
2604     int ret;
2605 
2606     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2607     if (!skb)
2608         return -ENOMEM;
2609 
2610     cmd = (struct wmi_delba_send_cmd *)skb->data;
2611     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
2612             FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2613     cmd->vdev_id = vdev_id;
2614     ether_addr_copy(cmd->peer_macaddr.addr, mac);
2615     cmd->tid = tid;
2616     cmd->initiator = initiator;
2617     cmd->reasoncode = reason;
2618 
2619     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2620            "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2621            vdev_id, mac, tid, initiator, reason);
2622 
2623     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
2624 
2625     if (ret) {
2626         ath11k_warn(ar->ab,
2627                 "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2628         dev_kfree_skb(skb);
2629     }
2630 
2631     return ret;
2632 }
2633 
2634 int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
2635                   u32 tid, u32 status)
2636 {
2637     struct ath11k_pdev_wmi *wmi = ar->wmi;
2638     struct wmi_addba_setresponse_cmd *cmd;
2639     struct sk_buff *skb;
2640     int ret;
2641 
2642     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2643     if (!skb)
2644         return -ENOMEM;
2645 
2646     cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
2647     cmd->tlv_header =
2648         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
2649         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2650     cmd->vdev_id = vdev_id;
2651     ether_addr_copy(cmd->peer_macaddr.addr, mac);
2652     cmd->tid = tid;
2653     cmd->statuscode = status;
2654 
2655     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2656            "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2657            vdev_id, mac, tid, status);
2658 
2659     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
2660 
2661     if (ret) {
2662         ath11k_warn(ar->ab,
2663                 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2664         dev_kfree_skb(skb);
2665     }
2666 
2667     return ret;
2668 }
2669 
2670 int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
2671               u32 tid, u32 buf_size)
2672 {
2673     struct ath11k_pdev_wmi *wmi = ar->wmi;
2674     struct wmi_addba_send_cmd *cmd;
2675     struct sk_buff *skb;
2676     int ret;
2677 
2678     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2679     if (!skb)
2680         return -ENOMEM;
2681 
2682     cmd = (struct wmi_addba_send_cmd *)skb->data;
2683     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
2684         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2685     cmd->vdev_id = vdev_id;
2686     ether_addr_copy(cmd->peer_macaddr.addr, mac);
2687     cmd->tid = tid;
2688     cmd->buffersize = buf_size;
2689 
2690     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2691            "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2692            vdev_id, mac, tid, buf_size);
2693 
2694     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
2695 
2696     if (ret) {
2697         ath11k_warn(ar->ab,
2698                 "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2699         dev_kfree_skb(skb);
2700     }
2701 
2702     return ret;
2703 }
2704 
2705 int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
2706 {
2707     struct ath11k_pdev_wmi *wmi = ar->wmi;
2708     struct wmi_addba_clear_resp_cmd *cmd;
2709     struct sk_buff *skb;
2710     int ret;
2711 
2712     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2713     if (!skb)
2714         return -ENOMEM;
2715 
2716     cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
2717     cmd->tlv_header =
2718         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
2719         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2720     cmd->vdev_id = vdev_id;
2721     ether_addr_copy(cmd->peer_macaddr.addr, mac);
2722 
2723     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2724            "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2725            vdev_id, mac);
2726 
2727     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
2728 
2729     if (ret) {
2730         ath11k_warn(ar->ab,
2731                 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2732         dev_kfree_skb(skb);
2733     }
2734 
2735     return ret;
2736 }
2737 
2738 int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
2739 {
2740     struct ath11k_pdev_wmi *wmi = ar->wmi;
2741     struct wmi_pdev_pktlog_filter_cmd *cmd;
2742     struct wmi_pdev_pktlog_filter_info *info;
2743     struct sk_buff *skb;
2744     struct wmi_tlv *tlv;
2745     void *ptr;
2746     int ret, len;
2747 
2748     len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
2749     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
2750     if (!skb)
2751         return -ENOMEM;
2752 
2753     cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
2754 
2755     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
2756               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2757 
2758     cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
2759     cmd->num_mac = 1;
2760     cmd->enable = enable;
2761 
2762     ptr = skb->data + sizeof(*cmd);
2763 
2764     tlv = ptr;
2765     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
2766               FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
2767 
2768     ptr += TLV_HDR_SIZE;
2769     info = ptr;
2770 
2771     ether_addr_copy(info->peer_macaddr.addr, addr);
2772     info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
2773                FIELD_PREP(WMI_TLV_LEN,
2774                       sizeof(*info) - TLV_HDR_SIZE);
2775 
2776     ret = ath11k_wmi_cmd_send(wmi, skb,
2777                   WMI_PDEV_PKTLOG_FILTER_CMDID);
2778     if (ret) {
2779         ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2780         dev_kfree_skb(skb);
2781     }
2782 
2783     return ret;
2784 }
2785 
2786 int
2787 ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
2788                  struct wmi_init_country_params init_cc_params)
2789 {
2790     struct ath11k_pdev_wmi *wmi = ar->wmi;
2791     struct wmi_init_country_cmd *cmd;
2792     struct sk_buff *skb;
2793     int ret;
2794 
2795     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2796     if (!skb)
2797         return -ENOMEM;
2798 
2799     cmd = (struct wmi_init_country_cmd *)skb->data;
2800     cmd->tlv_header =
2801         FIELD_PREP(WMI_TLV_TAG,
2802                WMI_TAG_SET_INIT_COUNTRY_CMD) |
2803         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2804 
2805     cmd->pdev_id = ar->pdev->pdev_id;
2806 
2807     switch (init_cc_params.flags) {
2808     case ALPHA_IS_SET:
2809         cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
2810         memcpy((u8 *)&cmd->cc_info.alpha2,
2811                init_cc_params.cc_info.alpha2, 3);
2812         break;
2813     case CC_IS_SET:
2814         cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
2815         cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
2816         break;
2817     case REGDMN_IS_SET:
2818         cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
2819         cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
2820         break;
2821     default:
2822         ret = -EINVAL;
2823         goto out;
2824     }
2825 
2826     ret = ath11k_wmi_cmd_send(wmi, skb,
2827                   WMI_SET_INIT_COUNTRY_CMDID);
2828 
2829 out:
2830     if (ret) {
2831         ath11k_warn(ar->ab,
2832                 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2833                 ret);
2834         dev_kfree_skb(skb);
2835     }
2836 
2837     return ret;
2838 }
2839 
2840 int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
2841                         struct wmi_set_current_country_params *param)
2842 {
2843     struct ath11k_pdev_wmi *wmi = ar->wmi;
2844     struct wmi_set_current_country_cmd *cmd;
2845     struct sk_buff *skb;
2846     int ret;
2847 
2848     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2849     if (!skb)
2850         return -ENOMEM;
2851 
2852     cmd = (struct wmi_set_current_country_cmd *)skb->data;
2853     cmd->tlv_header =
2854         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
2855         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2856 
2857     cmd->pdev_id = ar->pdev->pdev_id;
2858     memcpy(&cmd->new_alpha2, &param->alpha2, 3);
2859     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
2860 
2861     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2862            "set current country pdev id %d alpha2 %c%c\n",
2863            ar->pdev->pdev_id,
2864            param->alpha2[0],
2865            param->alpha2[1]);
2866 
2867     if (ret) {
2868         ath11k_warn(ar->ab,
2869                 "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
2870         dev_kfree_skb(skb);
2871     }
2872 
2873     return ret;
2874 }
2875 
2876 int
2877 ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
2878                          struct thermal_mitigation_params *param)
2879 {
2880     struct ath11k_pdev_wmi *wmi = ar->wmi;
2881     struct wmi_therm_throt_config_request_cmd *cmd;
2882     struct wmi_therm_throt_level_config_info *lvl_conf;
2883     struct wmi_tlv *tlv;
2884     struct sk_buff *skb;
2885     int i, ret, len;
2886 
2887     len = sizeof(*cmd) + TLV_HDR_SIZE +
2888           THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
2889 
2890     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
2891     if (!skb)
2892         return -ENOMEM;
2893 
2894     cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
2895 
2896     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
2897               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2898 
2899     cmd->pdev_id = ar->pdev->pdev_id;
2900     cmd->enable = param->enable;
2901     cmd->dc = param->dc;
2902     cmd->dc_per_event = param->dc_per_event;
2903     cmd->therm_throt_levels = THERMAL_LEVELS;
2904 
2905     tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2906     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
2907               FIELD_PREP(WMI_TLV_LEN,
2908                  (THERMAL_LEVELS *
2909                   sizeof(struct wmi_therm_throt_level_config_info)));
2910 
2911     lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
2912                                 sizeof(*cmd) +
2913                                 TLV_HDR_SIZE);
2914     for (i = 0; i < THERMAL_LEVELS; i++) {
2915         lvl_conf->tlv_header =
2916             FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
2917             FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
2918 
2919         lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
2920         lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
2921         lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
2922         lvl_conf->prio = param->levelconf[i].priority;
2923         lvl_conf++;
2924     }
2925 
2926     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
2927     if (ret) {
2928         ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
2929         dev_kfree_skb(skb);
2930     }
2931 
2932     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2933            "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
2934            ar->pdev->pdev_id, param->enable, param->dc,
2935            param->dc_per_event, THERMAL_LEVELS);
2936 
2937     return ret;
2938 }
2939 
2940 int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
2941                        struct wmi_11d_scan_start_params *param)
2942 {
2943     struct ath11k_pdev_wmi *wmi = ar->wmi;
2944     struct wmi_11d_scan_start_cmd *cmd;
2945     struct sk_buff *skb;
2946     int ret;
2947 
2948     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2949     if (!skb)
2950         return -ENOMEM;
2951 
2952     cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
2953     cmd->tlv_header =
2954         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
2955         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2956 
2957     cmd->vdev_id = param->vdev_id;
2958     cmd->scan_period_msec = param->scan_period_msec;
2959     cmd->start_interval_msec = param->start_interval_msec;
2960     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
2961 
2962     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2963            "send 11d scan start vdev id %d period %d ms internal %d ms\n",
2964            cmd->vdev_id,
2965            cmd->scan_period_msec,
2966            cmd->start_interval_msec);
2967 
2968     if (ret) {
2969         ath11k_warn(ar->ab,
2970                 "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
2971         dev_kfree_skb(skb);
2972     }
2973 
2974     return ret;
2975 }
2976 
2977 int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
2978 {
2979     struct ath11k_pdev_wmi *wmi = ar->wmi;
2980     struct wmi_11d_scan_stop_cmd *cmd;
2981     struct sk_buff *skb;
2982     int ret;
2983 
2984     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2985     if (!skb)
2986         return -ENOMEM;
2987 
2988     cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
2989     cmd->tlv_header =
2990         FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
2991         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
2992 
2993     cmd->vdev_id = vdev_id;
2994     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
2995 
2996     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
2997            "send 11d scan stop vdev id %d\n",
2998            cmd->vdev_id);
2999 
3000     if (ret) {
3001         ath11k_warn(ar->ab,
3002                 "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3003         dev_kfree_skb(skb);
3004     }
3005 
3006     return ret;
3007 }
3008 
3009 int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
3010 {
3011     struct ath11k_pdev_wmi *wmi = ar->wmi;
3012     struct wmi_pktlog_enable_cmd *cmd;
3013     struct sk_buff *skb;
3014     int ret;
3015 
3016     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3017     if (!skb)
3018         return -ENOMEM;
3019 
3020     cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
3021 
3022     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
3023               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
3024 
3025     cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
3026     cmd->evlist = pktlog_filter;
3027     cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
3028 
3029     ret = ath11k_wmi_cmd_send(wmi, skb,
3030                   WMI_PDEV_PKTLOG_ENABLE_CMDID);
3031     if (ret) {
3032         ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
3033         dev_kfree_skb(skb);
3034     }
3035 
3036     return ret;
3037 }
3038 
3039 int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
3040 {
3041     struct ath11k_pdev_wmi *wmi = ar->wmi;
3042     struct wmi_pktlog_disable_cmd *cmd;
3043     struct sk_buff *skb;
3044     int ret;
3045 
3046     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3047     if (!skb)
3048         return -ENOMEM;
3049 
3050     cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
3051 
3052     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
3053               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
3054 
3055     cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
3056 
3057     ret = ath11k_wmi_cmd_send(wmi, skb,
3058                   WMI_PDEV_PKTLOG_DISABLE_CMDID);
3059     if (ret) {
3060         ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
3061         dev_kfree_skb(skb);
3062     }
3063 
3064     return ret;
3065 }
3066 
3067 int
3068 ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
3069 {
3070     struct ath11k_pdev_wmi *wmi = ar->wmi;
3071     struct ath11k_base *ab = wmi->wmi_ab->ab;
3072     struct wmi_twt_enable_params_cmd *cmd;
3073     struct sk_buff *skb;
3074     int ret, len;
3075 
3076     len = sizeof(*cmd);
3077 
3078     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3079     if (!skb)
3080         return -ENOMEM;
3081 
3082     cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3083     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
3084               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3085     cmd->pdev_id = pdev_id;
3086     cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
3087     cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
3088     cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
3089     cmd->congestion_thresh_teardown =
3090         ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
3091     cmd->congestion_thresh_critical =
3092         ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
3093     cmd->interference_thresh_teardown =
3094         ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
3095     cmd->interference_thresh_setup =
3096         ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
3097     cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
3098     cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
3099     cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
3100     cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
3101     cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
3102     cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
3103     cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
3104     cmd->remove_sta_slot_interval =
3105         ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
3106     /* TODO add MBSSID support */
3107     cmd->mbss_support = 0;
3108 
3109     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
3110     if (ret) {
3111         ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3112         dev_kfree_skb(skb);
3113     } else {
3114         ar->twt_enabled = 1;
3115     }
3116     return ret;
3117 }
3118 
3119 int
3120 ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
3121 {
3122     struct ath11k_pdev_wmi *wmi = ar->wmi;
3123     struct ath11k_base *ab = wmi->wmi_ab->ab;
3124     struct wmi_twt_disable_params_cmd *cmd;
3125     struct sk_buff *skb;
3126     int ret, len;
3127 
3128     len = sizeof(*cmd);
3129 
3130     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3131     if (!skb)
3132         return -ENOMEM;
3133 
3134     cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3135     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
3136               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3137     cmd->pdev_id = pdev_id;
3138 
3139     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
3140     if (ret) {
3141         ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3142         dev_kfree_skb(skb);
3143     } else {
3144         ar->twt_enabled = 0;
3145     }
3146     return ret;
3147 }
3148 
3149 int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
3150                        struct wmi_twt_add_dialog_params *params)
3151 {
3152     struct ath11k_pdev_wmi *wmi = ar->wmi;
3153     struct ath11k_base *ab = wmi->wmi_ab->ab;
3154     struct wmi_twt_add_dialog_params_cmd *cmd;
3155     struct sk_buff *skb;
3156     int ret, len;
3157 
3158     len = sizeof(*cmd);
3159 
3160     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3161     if (!skb)
3162         return -ENOMEM;
3163 
3164     cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
3165     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
3166               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3167 
3168     cmd->vdev_id = params->vdev_id;
3169     ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
3170     cmd->dialog_id = params->dialog_id;
3171     cmd->wake_intvl_us = params->wake_intvl_us;
3172     cmd->wake_intvl_mantis = params->wake_intvl_mantis;
3173     cmd->wake_dura_us = params->wake_dura_us;
3174     cmd->sp_offset_us = params->sp_offset_us;
3175     cmd->flags = params->twt_cmd;
3176     if (params->flag_bcast)
3177         cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
3178     if (params->flag_trigger)
3179         cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
3180     if (params->flag_flow_type)
3181         cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
3182     if (params->flag_protection)
3183         cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
3184 
3185     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3186            "wmi add twt dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
3187            cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
3188            cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
3189            cmd->flags);
3190 
3191     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
3192 
3193     if (ret) {
3194         ath11k_warn(ab,
3195                 "failed to send wmi command to add twt dialog: %d",
3196                 ret);
3197         dev_kfree_skb(skb);
3198     }
3199     return ret;
3200 }
3201 
3202 int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
3203                        struct wmi_twt_del_dialog_params *params)
3204 {
3205     struct ath11k_pdev_wmi *wmi = ar->wmi;
3206     struct ath11k_base *ab = wmi->wmi_ab->ab;
3207     struct wmi_twt_del_dialog_params_cmd *cmd;
3208     struct sk_buff *skb;
3209     int ret, len;
3210 
3211     len = sizeof(*cmd);
3212 
3213     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3214     if (!skb)
3215         return -ENOMEM;
3216 
3217     cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
3218     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
3219               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3220 
3221     cmd->vdev_id = params->vdev_id;
3222     ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
3223     cmd->dialog_id = params->dialog_id;
3224 
3225     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3226            "wmi delete twt dialog vdev %u dialog id %u\n",
3227            cmd->vdev_id, cmd->dialog_id);
3228 
3229     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
3230     if (ret) {
3231         ath11k_warn(ab,
3232                 "failed to send wmi command to delete twt dialog: %d",
3233                 ret);
3234         dev_kfree_skb(skb);
3235     }
3236     return ret;
3237 }
3238 
3239 int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
3240                      struct wmi_twt_pause_dialog_params *params)
3241 {
3242     struct ath11k_pdev_wmi *wmi = ar->wmi;
3243     struct ath11k_base *ab = wmi->wmi_ab->ab;
3244     struct wmi_twt_pause_dialog_params_cmd *cmd;
3245     struct sk_buff *skb;
3246     int ret, len;
3247 
3248     len = sizeof(*cmd);
3249 
3250     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3251     if (!skb)
3252         return -ENOMEM;
3253 
3254     cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
3255     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3256                      WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
3257               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3258 
3259     cmd->vdev_id = params->vdev_id;
3260     ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
3261     cmd->dialog_id = params->dialog_id;
3262 
3263     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3264            "wmi pause twt dialog vdev %u dialog id %u\n",
3265            cmd->vdev_id, cmd->dialog_id);
3266 
3267     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
3268     if (ret) {
3269         ath11k_warn(ab,
3270                 "failed to send wmi command to pause twt dialog: %d",
3271                 ret);
3272         dev_kfree_skb(skb);
3273     }
3274     return ret;
3275 }
3276 
3277 int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
3278                       struct wmi_twt_resume_dialog_params *params)
3279 {
3280     struct ath11k_pdev_wmi *wmi = ar->wmi;
3281     struct ath11k_base *ab = wmi->wmi_ab->ab;
3282     struct wmi_twt_resume_dialog_params_cmd *cmd;
3283     struct sk_buff *skb;
3284     int ret, len;
3285 
3286     len = sizeof(*cmd);
3287 
3288     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3289     if (!skb)
3290         return -ENOMEM;
3291 
3292     cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
3293     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3294                      WMI_TAG_TWT_RESUME_DIALOG_CMD) |
3295               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3296 
3297     cmd->vdev_id = params->vdev_id;
3298     ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
3299     cmd->dialog_id = params->dialog_id;
3300     cmd->sp_offset_us = params->sp_offset_us;
3301     cmd->next_twt_size = params->next_twt_size;
3302 
3303     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3304            "wmi resume twt dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
3305            cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
3306            cmd->next_twt_size);
3307 
3308     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
3309     if (ret) {
3310         ath11k_warn(ab,
3311                 "failed to send wmi command to resume twt dialog: %d",
3312                 ret);
3313         dev_kfree_skb(skb);
3314     }
3315     return ret;
3316 }
3317 
3318 int
3319 ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
3320                  struct ieee80211_he_obss_pd *he_obss_pd)
3321 {
3322     struct ath11k_pdev_wmi *wmi = ar->wmi;
3323     struct ath11k_base *ab = wmi->wmi_ab->ab;
3324     struct wmi_obss_spatial_reuse_params_cmd *cmd;
3325     struct sk_buff *skb;
3326     int ret, len;
3327 
3328     len = sizeof(*cmd);
3329 
3330     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3331     if (!skb)
3332         return -ENOMEM;
3333 
3334     cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3335     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3336                      WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
3337               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3338     cmd->vdev_id = vdev_id;
3339     cmd->enable = he_obss_pd->enable;
3340     cmd->obss_min = he_obss_pd->min_offset;
3341     cmd->obss_max = he_obss_pd->max_offset;
3342 
3343     ret = ath11k_wmi_cmd_send(wmi, skb,
3344                   WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3345     if (ret) {
3346         ath11k_warn(ab,
3347                 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3348         dev_kfree_skb(skb);
3349     }
3350     return ret;
3351 }
3352 
3353 int
3354 ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
3355 {
3356     struct ath11k_pdev_wmi *wmi = ar->wmi;
3357     struct ath11k_base *ab = wmi->wmi_ab->ab;
3358     struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
3359     struct sk_buff *skb;
3360     int ret, len;
3361 
3362     len = sizeof(*cmd);
3363 
3364     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3365     if (!skb)
3366         return -ENOMEM;
3367 
3368     cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
3369     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3370                      WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
3371               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3372     cmd->pdev_id = ar->pdev->pdev_id;
3373     memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
3374 
3375     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3376            "obss pd pdev_id %d bss color bitmap %08x %08x\n",
3377            cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
3378 
3379     ret = ath11k_wmi_cmd_send(wmi, skb,
3380                   WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
3381     if (ret) {
3382         ath11k_warn(ab,
3383                 "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
3384         dev_kfree_skb(skb);
3385     }
3386 
3387     return ret;
3388 }
3389 
3390 int
3391 ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
3392 {
3393     struct ath11k_pdev_wmi *wmi = ar->wmi;
3394     struct ath11k_base *ab = wmi->wmi_ab->ab;
3395     struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
3396     struct sk_buff *skb;
3397     int ret, len;
3398 
3399     len = sizeof(*cmd);
3400 
3401     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3402     if (!skb)
3403         return -ENOMEM;
3404 
3405     cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
3406     cmd->tlv_header =
3407         FIELD_PREP(WMI_TLV_TAG,
3408                WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
3409         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3410     cmd->pdev_id = ar->pdev->pdev_id;
3411     memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
3412 
3413     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3414            "obss pd pdev_id %d partial bssid bitmap %08x %08x\n",
3415            cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
3416 
3417     ret = ath11k_wmi_cmd_send(wmi, skb,
3418                   WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
3419     if (ret) {
3420         ath11k_warn(ab,
3421                 "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
3422         dev_kfree_skb(skb);
3423     }
3424 
3425     return ret;
3426 }
3427 
3428 int
3429 ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
3430 {
3431     struct ath11k_pdev_wmi *wmi = ar->wmi;
3432     struct ath11k_base *ab = wmi->wmi_ab->ab;
3433     struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
3434     struct sk_buff *skb;
3435     int ret, len;
3436 
3437     len = sizeof(*cmd);
3438 
3439     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3440     if (!skb)
3441         return -ENOMEM;
3442 
3443     cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
3444     cmd->tlv_header =
3445         FIELD_PREP(WMI_TLV_TAG,
3446                WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
3447         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3448     cmd->pdev_id = ar->pdev->pdev_id;
3449     memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
3450 
3451     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3452            "obss pd srg pdev_id %d bss color enable bitmap %08x %08x\n",
3453            cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
3454 
3455     ret = ath11k_wmi_cmd_send(wmi, skb,
3456                   WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
3457     if (ret) {
3458         ath11k_warn(ab,
3459                 "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
3460         dev_kfree_skb(skb);
3461     }
3462 
3463     return ret;
3464 }
3465 
3466 int
3467 ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
3468 {
3469     struct ath11k_pdev_wmi *wmi = ar->wmi;
3470     struct ath11k_base *ab = wmi->wmi_ab->ab;
3471     struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
3472     struct sk_buff *skb;
3473     int ret, len;
3474 
3475     len = sizeof(*cmd);
3476 
3477     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3478     if (!skb)
3479         return -ENOMEM;
3480 
3481     cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
3482     cmd->tlv_header =
3483         FIELD_PREP(WMI_TLV_TAG,
3484                WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
3485         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3486     cmd->pdev_id = ar->pdev->pdev_id;
3487     memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
3488 
3489     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3490            "obss pd srg pdev_id %d bssid enable bitmap %08x %08x\n",
3491            cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
3492 
3493     ret = ath11k_wmi_cmd_send(wmi, skb,
3494                   WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
3495     if (ret) {
3496         ath11k_warn(ab,
3497                 "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
3498         dev_kfree_skb(skb);
3499     }
3500 
3501     return ret;
3502 }
3503 
3504 int
3505 ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
3506 {
3507     struct ath11k_pdev_wmi *wmi = ar->wmi;
3508     struct ath11k_base *ab = wmi->wmi_ab->ab;
3509     struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
3510     struct sk_buff *skb;
3511     int ret, len;
3512 
3513     len = sizeof(*cmd);
3514 
3515     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3516     if (!skb)
3517         return -ENOMEM;
3518 
3519     cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
3520     cmd->tlv_header =
3521         FIELD_PREP(WMI_TLV_TAG,
3522                WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
3523         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3524     cmd->pdev_id = ar->pdev->pdev_id;
3525     memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
3526 
3527     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3528            "obss pd non_srg pdev_id %d bss color enable bitmap %08x %08x\n",
3529            cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
3530 
3531     ret = ath11k_wmi_cmd_send(wmi, skb,
3532                   WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
3533     if (ret) {
3534         ath11k_warn(ab,
3535                 "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
3536         dev_kfree_skb(skb);
3537     }
3538 
3539     return ret;
3540 }
3541 
3542 int
3543 ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
3544 {
3545     struct ath11k_pdev_wmi *wmi = ar->wmi;
3546     struct ath11k_base *ab = wmi->wmi_ab->ab;
3547     struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
3548     struct sk_buff *skb;
3549     int ret, len;
3550 
3551     len = sizeof(*cmd);
3552 
3553     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3554     if (!skb)
3555         return -ENOMEM;
3556 
3557     cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
3558     cmd->tlv_header =
3559         FIELD_PREP(WMI_TLV_TAG,
3560                WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
3561         FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3562     cmd->pdev_id = ar->pdev->pdev_id;
3563     memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
3564 
3565     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3566            "obss pd non_srg pdev_id %d bssid enable bitmap %08x %08x\n",
3567            cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
3568 
3569     ret = ath11k_wmi_cmd_send(wmi, skb,
3570                   WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
3571     if (ret) {
3572         ath11k_warn(ab,
3573                 "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
3574         dev_kfree_skb(skb);
3575     }
3576 
3577     return ret;
3578 }
3579 
3580 int
3581 ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
3582                          u8 bss_color, u32 period,
3583                          bool enable)
3584 {
3585     struct ath11k_pdev_wmi *wmi = ar->wmi;
3586     struct ath11k_base *ab = wmi->wmi_ab->ab;
3587     struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3588     struct sk_buff *skb;
3589     int ret, len;
3590 
3591     len = sizeof(*cmd);
3592 
3593     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3594     if (!skb)
3595         return -ENOMEM;
3596 
3597     cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3598     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3599                      WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
3600               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3601     cmd->vdev_id = vdev_id;
3602     cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
3603                  ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
3604     cmd->current_bss_color = bss_color;
3605     cmd->detection_period_ms = period;
3606     cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
3607     cmd->free_slot_expiry_time_ms = 0;
3608     cmd->flags = 0;
3609 
3610     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3611            "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3612            cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3613            cmd->detection_period_ms, cmd->scan_period_ms);
3614 
3615     ret = ath11k_wmi_cmd_send(wmi, skb,
3616                   WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3617     if (ret) {
3618         ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3619         dev_kfree_skb(skb);
3620     }
3621     return ret;
3622 }
3623 
3624 int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
3625                         bool enable)
3626 {
3627     struct ath11k_pdev_wmi *wmi = ar->wmi;
3628     struct ath11k_base *ab = wmi->wmi_ab->ab;
3629     struct wmi_bss_color_change_enable_params_cmd *cmd;
3630     struct sk_buff *skb;
3631     int ret, len;
3632 
3633     len = sizeof(*cmd);
3634 
3635     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3636     if (!skb)
3637         return -ENOMEM;
3638 
3639     cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3640     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
3641               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3642     cmd->vdev_id = vdev_id;
3643     cmd->enable = enable ? 1 : 0;
3644 
3645     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3646            "wmi_send_bss_color_change_enable id %d enable %d\n",
3647            cmd->vdev_id, cmd->enable);
3648 
3649     ret = ath11k_wmi_cmd_send(wmi, skb,
3650                   WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3651     if (ret) {
3652         ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3653         dev_kfree_skb(skb);
3654     }
3655     return ret;
3656 }
3657 
3658 int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
3659                    struct sk_buff *tmpl)
3660 {
3661     struct wmi_tlv *tlv;
3662     struct sk_buff *skb;
3663     void *ptr;
3664     int ret, len;
3665     size_t aligned_len;
3666     struct wmi_fils_discovery_tmpl_cmd *cmd;
3667 
3668     aligned_len = roundup(tmpl->len, 4);
3669     len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3670 
3671     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3672            "WMI vdev %i set FILS discovery template\n", vdev_id);
3673 
3674     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3675     if (!skb)
3676         return -ENOMEM;
3677 
3678     cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3679     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3680                      WMI_TAG_FILS_DISCOVERY_TMPL_CMD) |
3681               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
3682     cmd->vdev_id = vdev_id;
3683     cmd->buf_len = tmpl->len;
3684     ptr = skb->data + sizeof(*cmd);
3685 
3686     tlv = ptr;
3687     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
3688               FIELD_PREP(WMI_TLV_LEN, aligned_len);
3689     memcpy(tlv->value, tmpl->data, tmpl->len);
3690 
3691     ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3692     if (ret) {
3693         ath11k_warn(ar->ab,
3694                 "WMI vdev %i failed to send FILS discovery template command\n",
3695                 vdev_id);
3696         dev_kfree_skb(skb);
3697     }
3698     return ret;
3699 }
3700 
3701 int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
3702                    struct sk_buff *tmpl)
3703 {
3704     struct wmi_probe_tmpl_cmd *cmd;
3705     struct wmi_bcn_prb_info *probe_info;
3706     struct wmi_tlv *tlv;
3707     struct sk_buff *skb;
3708     void *ptr;
3709     int ret, len;
3710     size_t aligned_len = roundup(tmpl->len, 4);
3711 
3712     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3713            "WMI vdev %i set probe response template\n", vdev_id);
3714 
3715     len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3716 
3717     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3718     if (!skb)
3719         return -ENOMEM;
3720 
3721     cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3722     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) |
3723               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
3724     cmd->vdev_id = vdev_id;
3725     cmd->buf_len = tmpl->len;
3726 
3727     ptr = skb->data + sizeof(*cmd);
3728 
3729     probe_info = ptr;
3730     len = sizeof(*probe_info);
3731     probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
3732                         WMI_TAG_BCN_PRB_INFO) |
3733                  FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3734     probe_info->caps = 0;
3735     probe_info->erp = 0;
3736 
3737     ptr += sizeof(*probe_info);
3738 
3739     tlv = ptr;
3740     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
3741               FIELD_PREP(WMI_TLV_LEN, aligned_len);
3742     memcpy(tlv->value, tmpl->data, tmpl->len);
3743 
3744     ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3745     if (ret) {
3746         ath11k_warn(ar->ab,
3747                 "WMI vdev %i failed to send probe response template command\n",
3748                 vdev_id);
3749         dev_kfree_skb(skb);
3750     }
3751     return ret;
3752 }
3753 
3754 int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
3755                   bool unsol_bcast_probe_resp_enabled)
3756 {
3757     struct sk_buff *skb;
3758     int ret, len;
3759     struct wmi_fils_discovery_cmd *cmd;
3760 
3761     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
3762            "WMI vdev %i set %s interval to %u TU\n",
3763            vdev_id, unsol_bcast_probe_resp_enabled ?
3764            "unsolicited broadcast probe response" : "FILS discovery",
3765            interval);
3766 
3767     len = sizeof(*cmd);
3768     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3769     if (!skb)
3770         return -ENOMEM;
3771 
3772     cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3773     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) |
3774               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
3775     cmd->vdev_id = vdev_id;
3776     cmd->interval = interval;
3777     cmd->config = unsol_bcast_probe_resp_enabled;
3778 
3779     ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3780     if (ret) {
3781         ath11k_warn(ar->ab,
3782                 "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3783                 vdev_id);
3784         dev_kfree_skb(skb);
3785     }
3786     return ret;
3787 }
3788 
3789 static void
3790 ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
3791 {
3792     const void **tb;
3793     const struct wmi_obss_color_collision_event *ev;
3794     struct ath11k_vif *arvif;
3795     int ret;
3796 
3797     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
3798     if (IS_ERR(tb)) {
3799         ret = PTR_ERR(tb);
3800         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
3801         return;
3802     }
3803 
3804     rcu_read_lock();
3805 
3806     ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
3807     if (!ev) {
3808         ath11k_warn(ab, "failed to fetch obss color collision ev");
3809         goto exit;
3810     }
3811 
3812     arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
3813     if (!arvif) {
3814         ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
3815                 ev->vdev_id);
3816         goto exit;
3817     }
3818 
3819     switch (ev->evt_type) {
3820     case WMI_BSS_COLOR_COLLISION_DETECTION:
3821         ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
3822                                GFP_KERNEL);
3823         ath11k_dbg(ab, ATH11K_DBG_WMI,
3824                "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
3825                ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
3826         break;
3827     case WMI_BSS_COLOR_COLLISION_DISABLE:
3828     case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
3829     case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
3830         break;
3831     default:
3832         ath11k_warn(ab, "received unknown obss color collision detection event\n");
3833     }
3834 
3835 exit:
3836     kfree(tb);
3837     rcu_read_unlock();
3838 }
3839 
3840 static void
3841 ath11k_fill_band_to_mac_param(struct ath11k_base  *soc,
3842                   struct wmi_host_pdev_band_to_mac *band_to_mac)
3843 {
3844     u8 i;
3845     struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
3846     struct ath11k_pdev *pdev;
3847 
3848     for (i = 0; i < soc->num_radios; i++) {
3849         pdev = &soc->pdevs[i];
3850         hal_reg_cap = &soc->hal_reg_cap[i];
3851         band_to_mac[i].pdev_id = pdev->pdev_id;
3852 
3853         switch (pdev->cap.supported_bands) {
3854         case WMI_HOST_WLAN_2G_5G_CAP:
3855             band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
3856             band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
3857             break;
3858         case WMI_HOST_WLAN_2G_CAP:
3859             band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
3860             band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
3861             break;
3862         case WMI_HOST_WLAN_5G_CAP:
3863             band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
3864             band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
3865             break;
3866         default:
3867             break;
3868         }
3869     }
3870 }
3871 
3872 static void
3873 ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
3874                 struct target_resource_config *tg_cfg)
3875 {
3876     wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
3877     wmi_cfg->num_peers = tg_cfg->num_peers;
3878     wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
3879     wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
3880     wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
3881     wmi_cfg->num_tids = tg_cfg->num_tids;
3882     wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
3883     wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
3884     wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
3885     wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
3886     wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
3887     wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
3888     wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
3889     wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
3890     wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
3891     wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
3892     wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
3893     wmi_cfg->roam_offload_max_ap_profiles =
3894         tg_cfg->roam_offload_max_ap_profiles;
3895     wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
3896     wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
3897     wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
3898     wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
3899     wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
3900     wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
3901     wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
3902     wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3903         tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
3904     wmi_cfg->vow_config = tg_cfg->vow_config;
3905     wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
3906     wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
3907     wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
3908     wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
3909     wmi_cfg->num_tdls_conn_table_entries =
3910         tg_cfg->num_tdls_conn_table_entries;
3911     wmi_cfg->beacon_tx_offload_max_vdev =
3912         tg_cfg->beacon_tx_offload_max_vdev;
3913     wmi_cfg->num_multicast_filter_entries =
3914         tg_cfg->num_multicast_filter_entries;
3915     wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
3916     wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
3917     wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
3918     wmi_cfg->max_tdls_concurrent_sleep_sta =
3919         tg_cfg->max_tdls_concurrent_sleep_sta;
3920     wmi_cfg->max_tdls_concurrent_buffer_sta =
3921         tg_cfg->max_tdls_concurrent_buffer_sta;
3922     wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
3923     wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
3924     wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
3925     wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
3926     wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
3927     wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
3928     wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
3929     wmi_cfg->flag1 = tg_cfg->flag1;
3930     wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
3931     wmi_cfg->sched_params = tg_cfg->sched_params;
3932     wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
3933     wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
3934 }
3935 
3936 static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
3937                 struct wmi_init_cmd_param *param)
3938 {
3939     struct ath11k_base *ab = wmi->wmi_ab->ab;
3940     struct sk_buff *skb;
3941     struct wmi_init_cmd *cmd;
3942     struct wmi_resource_config *cfg;
3943     struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
3944     struct wmi_pdev_band_to_mac *band_to_mac;
3945     struct wlan_host_mem_chunk *host_mem_chunks;
3946     struct wmi_tlv *tlv;
3947     size_t ret, len;
3948     void *ptr;
3949     u32 hw_mode_len = 0;
3950     u16 idx;
3951 
3952     if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3953         hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3954                   (param->num_band_to_mac * sizeof(*band_to_mac));
3955 
3956     len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3957           (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3958 
3959     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
3960     if (!skb)
3961         return -ENOMEM;
3962 
3963     cmd = (struct wmi_init_cmd *)skb->data;
3964 
3965     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
3966               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
3967 
3968     ptr = skb->data + sizeof(*cmd);
3969     cfg = ptr;
3970 
3971     ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
3972 
3973     cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
3974               FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
3975 
3976     ptr += sizeof(*cfg);
3977     host_mem_chunks = ptr + TLV_HDR_SIZE;
3978     len = sizeof(struct wlan_host_mem_chunk);
3979 
3980     for (idx = 0; idx < param->num_mem_chunks; ++idx) {
3981         host_mem_chunks[idx].tlv_header =
3982                 FIELD_PREP(WMI_TLV_TAG,
3983                        WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
3984                 FIELD_PREP(WMI_TLV_LEN, len);
3985 
3986         host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
3987         host_mem_chunks[idx].size = param->mem_chunks[idx].len;
3988         host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
3989 
3990         ath11k_dbg(ab, ATH11K_DBG_WMI,
3991                "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3992                param->mem_chunks[idx].req_id,
3993                (u64)param->mem_chunks[idx].paddr,
3994                param->mem_chunks[idx].len);
3995     }
3996     cmd->num_host_mem_chunks = param->num_mem_chunks;
3997     len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
3998 
3999     /* num_mem_chunks is zero */
4000     tlv = ptr;
4001     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
4002               FIELD_PREP(WMI_TLV_LEN, len);
4003     ptr += TLV_HDR_SIZE + len;
4004 
4005     if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
4006         hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
4007         hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
4008                          WMI_TAG_PDEV_SET_HW_MODE_CMD) |
4009                       FIELD_PREP(WMI_TLV_LEN,
4010                          sizeof(*hw_mode) - TLV_HDR_SIZE);
4011 
4012         hw_mode->hw_mode_index = param->hw_mode_id;
4013         hw_mode->num_band_to_mac = param->num_band_to_mac;
4014 
4015         ptr += sizeof(*hw_mode);
4016 
4017         len = param->num_band_to_mac * sizeof(*band_to_mac);
4018         tlv = ptr;
4019         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
4020                   FIELD_PREP(WMI_TLV_LEN, len);
4021 
4022         ptr += TLV_HDR_SIZE;
4023         len = sizeof(*band_to_mac);
4024 
4025         for (idx = 0; idx < param->num_band_to_mac; idx++) {
4026             band_to_mac = (void *)ptr;
4027 
4028             band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
4029                                  WMI_TAG_PDEV_BAND_TO_MAC) |
4030                           FIELD_PREP(WMI_TLV_LEN,
4031                                  len - TLV_HDR_SIZE);
4032             band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
4033             band_to_mac->start_freq =
4034                 param->band_to_mac[idx].start_freq;
4035             band_to_mac->end_freq =
4036                 param->band_to_mac[idx].end_freq;
4037             ptr += sizeof(*band_to_mac);
4038         }
4039     }
4040 
4041     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
4042     if (ret) {
4043         ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
4044         dev_kfree_skb(skb);
4045     }
4046 
4047     return ret;
4048 }
4049 
4050 int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
4051                 int pdev_id)
4052 {
4053     struct ath11k_wmi_pdev_lro_config_cmd *cmd;
4054     struct sk_buff *skb;
4055     int ret;
4056 
4057     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4058     if (!skb)
4059         return -ENOMEM;
4060 
4061     cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
4062     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
4063               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
4064 
4065     get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
4066     get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
4067 
4068     cmd->pdev_id = pdev_id;
4069 
4070     ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4071     if (ret) {
4072         ath11k_warn(ar->ab,
4073                 "failed to send lro cfg req wmi cmd\n");
4074         goto err;
4075     }
4076 
4077     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
4078            "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4079     return 0;
4080 err:
4081     dev_kfree_skb(skb);
4082     return ret;
4083 }
4084 
4085 int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
4086 {
4087     unsigned long time_left;
4088 
4089     time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4090                         WMI_SERVICE_READY_TIMEOUT_HZ);
4091     if (!time_left)
4092         return -ETIMEDOUT;
4093 
4094     return 0;
4095 }
4096 
4097 int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
4098 {
4099     unsigned long time_left;
4100 
4101     time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4102                         WMI_SERVICE_READY_TIMEOUT_HZ);
4103     if (!time_left)
4104         return -ETIMEDOUT;
4105 
4106     return 0;
4107 }
4108 
4109 int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
4110                enum wmi_host_hw_mode_config_type mode)
4111 {
4112     struct wmi_pdev_set_hw_mode_cmd_param *cmd;
4113     struct sk_buff *skb;
4114     struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab;
4115     int len;
4116     int ret;
4117 
4118     len = sizeof(*cmd);
4119 
4120     skb = ath11k_wmi_alloc_skb(wmi_ab, len);
4121     if (!skb)
4122         return -ENOMEM;
4123 
4124     cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
4125 
4126     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
4127               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
4128 
4129     cmd->pdev_id = WMI_PDEV_ID_SOC;
4130     cmd->hw_mode_index = mode;
4131 
4132     ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4133     if (ret) {
4134         ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4135         dev_kfree_skb(skb);
4136     }
4137 
4138     return ret;
4139 }
4140 
4141 int ath11k_wmi_cmd_init(struct ath11k_base *ab)
4142 {
4143     struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
4144     struct wmi_init_cmd_param init_param;
4145     struct target_resource_config  config;
4146 
4147     memset(&init_param, 0, sizeof(init_param));
4148     memset(&config, 0, sizeof(config));
4149 
4150     ab->hw_params.hw_ops->wmi_init_config(ab, &config);
4151 
4152     memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
4153 
4154     init_param.res_cfg = &wmi_sc->wlan_resource_config;
4155     init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
4156     init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
4157     init_param.mem_chunks = wmi_sc->mem_chunks;
4158 
4159     if (ab->hw_params.single_pdev_only)
4160         init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4161 
4162     init_param.num_band_to_mac = ab->num_radios;
4163     ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
4164 
4165     return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
4166 }
4167 
4168 int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
4169                   struct ath11k_wmi_vdev_spectral_conf_param *param)
4170 {
4171     struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
4172     struct sk_buff *skb;
4173     int ret;
4174 
4175     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4176     if (!skb)
4177         return -ENOMEM;
4178 
4179     cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
4180     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
4181                      WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
4182               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
4183 
4184     memcpy(&cmd->param, param, sizeof(*param));
4185 
4186     ret = ath11k_wmi_cmd_send(ar->wmi, skb,
4187                   WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4188     if (ret) {
4189         ath11k_warn(ar->ab,
4190                 "failed to send spectral scan config wmi cmd\n");
4191         goto err;
4192     }
4193 
4194     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
4195            "WMI spectral scan config cmd vdev_id 0x%x\n",
4196            param->vdev_id);
4197 
4198     return 0;
4199 err:
4200     dev_kfree_skb(skb);
4201     return ret;
4202 }
4203 
4204 int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
4205                     u32 trigger, u32 enable)
4206 {
4207     struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
4208     struct sk_buff *skb;
4209     int ret;
4210 
4211     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4212     if (!skb)
4213         return -ENOMEM;
4214 
4215     cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
4216     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
4217                      WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
4218               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
4219 
4220     cmd->vdev_id = vdev_id;
4221     cmd->trigger_cmd = trigger;
4222     cmd->enable_cmd = enable;
4223 
4224     ret = ath11k_wmi_cmd_send(ar->wmi, skb,
4225                   WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4226     if (ret) {
4227         ath11k_warn(ar->ab,
4228                 "failed to send spectral enable wmi cmd\n");
4229         goto err;
4230     }
4231 
4232     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
4233            "WMI spectral enable cmd vdev id 0x%x\n",
4234            vdev_id);
4235 
4236     return 0;
4237 err:
4238     dev_kfree_skb(skb);
4239     return ret;
4240 }
4241 
4242 int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
4243                  struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
4244 {
4245     struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4246     struct sk_buff *skb;
4247     int ret;
4248 
4249     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4250     if (!skb)
4251         return -ENOMEM;
4252 
4253     cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4254     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
4255               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
4256 
4257     cmd->pdev_id        = param->pdev_id;
4258     cmd->module_id      = param->module_id;
4259     cmd->base_paddr_lo  = param->base_paddr_lo;
4260     cmd->base_paddr_hi  = param->base_paddr_hi;
4261     cmd->head_idx_paddr_lo  = param->head_idx_paddr_lo;
4262     cmd->head_idx_paddr_hi  = param->head_idx_paddr_hi;
4263     cmd->tail_idx_paddr_lo  = param->tail_idx_paddr_lo;
4264     cmd->tail_idx_paddr_hi  = param->tail_idx_paddr_hi;
4265     cmd->num_elems      = param->num_elems;
4266     cmd->buf_size       = param->buf_size;
4267     cmd->num_resp_per_event = param->num_resp_per_event;
4268     cmd->event_timeout_ms   = param->event_timeout_ms;
4269 
4270     ret = ath11k_wmi_cmd_send(ar->wmi, skb,
4271                   WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4272     if (ret) {
4273         ath11k_warn(ar->ab,
4274                 "failed to send dma ring cfg req wmi cmd\n");
4275         goto err;
4276     }
4277 
4278     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
4279            "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4280            param->pdev_id);
4281 
4282     return 0;
4283 err:
4284     dev_kfree_skb(skb);
4285     return ret;
4286 }
4287 
4288 static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
4289                           u16 tag, u16 len,
4290                           const void *ptr, void *data)
4291 {
4292     struct wmi_tlv_dma_buf_release_parse *parse = data;
4293 
4294     if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4295         return -EPROTO;
4296 
4297     if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
4298         return -ENOBUFS;
4299 
4300     parse->num_buf_entry++;
4301     return 0;
4302 }
4303 
4304 static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
4305                          u16 tag, u16 len,
4306                          const void *ptr, void *data)
4307 {
4308     struct wmi_tlv_dma_buf_release_parse *parse = data;
4309 
4310     if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4311         return -EPROTO;
4312 
4313     if (parse->num_meta >= parse->fixed.num_meta_data_entry)
4314         return -ENOBUFS;
4315 
4316     parse->num_meta++;
4317     return 0;
4318 }
4319 
4320 static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
4321                     u16 tag, u16 len,
4322                     const void *ptr, void *data)
4323 {
4324     struct wmi_tlv_dma_buf_release_parse *parse = data;
4325     int ret;
4326 
4327     switch (tag) {
4328     case WMI_TAG_DMA_BUF_RELEASE:
4329         memcpy(&parse->fixed, ptr,
4330                sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
4331         parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
4332         break;
4333     case WMI_TAG_ARRAY_STRUCT:
4334         if (!parse->buf_entry_done) {
4335             parse->num_buf_entry = 0;
4336             parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
4337 
4338             ret = ath11k_wmi_tlv_iter(ab, ptr, len,
4339                           ath11k_wmi_tlv_dma_buf_entry_parse,
4340                           parse);
4341             if (ret) {
4342                 ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4343                         ret);
4344                 return ret;
4345             }
4346 
4347             parse->buf_entry_done = true;
4348         } else if (!parse->meta_data_done) {
4349             parse->num_meta = 0;
4350             parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
4351 
4352             ret = ath11k_wmi_tlv_iter(ab, ptr, len,
4353                           ath11k_wmi_tlv_dma_buf_meta_parse,
4354                           parse);
4355             if (ret) {
4356                 ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4357                         ret);
4358                 return ret;
4359             }
4360 
4361             parse->meta_data_done = true;
4362         }
4363         break;
4364     default:
4365         break;
4366     }
4367     return 0;
4368 }
4369 
4370 static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
4371                                struct sk_buff *skb)
4372 {
4373     struct wmi_tlv_dma_buf_release_parse parse = { };
4374     struct ath11k_dbring_buf_release_event param;
4375     int ret;
4376 
4377     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
4378                   ath11k_wmi_tlv_dma_buf_parse,
4379                   &parse);
4380     if (ret) {
4381         ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4382         return;
4383     }
4384 
4385     param.fixed     = parse.fixed;
4386     param.buf_entry     = parse.buf_entry;
4387     param.num_buf_entry = parse.num_buf_entry;
4388     param.meta_data     = parse.meta_data;
4389     param.num_meta      = parse.num_meta;
4390 
4391     ret = ath11k_dbring_buffer_release_event(ab, &param);
4392     if (ret) {
4393         ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4394         return;
4395     }
4396 }
4397 
4398 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
4399                          u16 tag, u16 len,
4400                          const void *ptr, void *data)
4401 {
4402     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4403     struct wmi_hw_mode_capabilities *hw_mode_cap;
4404     u32 phy_map = 0;
4405 
4406     if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4407         return -EPROTO;
4408 
4409     if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
4410         return -ENOBUFS;
4411 
4412     hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
4413                    hw_mode_id);
4414     svc_rdy_ext->n_hw_mode_caps++;
4415 
4416     phy_map = hw_mode_cap->phy_id_map;
4417     while (phy_map) {
4418         svc_rdy_ext->tot_phy_id++;
4419         phy_map = phy_map >> 1;
4420     }
4421 
4422     return 0;
4423 }
4424 
4425 static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
4426                        u16 len, const void *ptr, void *data)
4427 {
4428     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4429     struct wmi_hw_mode_capabilities *hw_mode_caps;
4430     enum wmi_host_hw_mode_config_type mode, pref;
4431     u32 i;
4432     int ret;
4433 
4434     svc_rdy_ext->n_hw_mode_caps = 0;
4435     svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
4436 
4437     ret = ath11k_wmi_tlv_iter(soc, ptr, len,
4438                   ath11k_wmi_tlv_hw_mode_caps_parse,
4439                   svc_rdy_ext);
4440     if (ret) {
4441         ath11k_warn(soc, "failed to parse tlv %d\n", ret);
4442         return ret;
4443     }
4444 
4445     i = 0;
4446     while (i < svc_rdy_ext->n_hw_mode_caps) {
4447         hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4448         mode = hw_mode_caps->hw_mode_id;
4449         pref = soc->wmi_ab.preferred_hw_mode;
4450 
4451         if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
4452             svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4453             soc->wmi_ab.preferred_hw_mode = mode;
4454         }
4455         i++;
4456     }
4457 
4458     ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
4459            soc->wmi_ab.preferred_hw_mode);
4460     if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4461         return -EINVAL;
4462 
4463     return 0;
4464 }
4465 
4466 static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
4467                          u16 tag, u16 len,
4468                          const void *ptr, void *data)
4469 {
4470     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4471 
4472     if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4473         return -EPROTO;
4474 
4475     if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4476         return -ENOBUFS;
4477 
4478     len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
4479     if (!svc_rdy_ext->n_mac_phy_caps) {
4480         svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id,
4481                             len, GFP_ATOMIC);
4482         if (!svc_rdy_ext->mac_phy_caps)
4483             return -ENOMEM;
4484     }
4485 
4486     memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4487     svc_rdy_ext->n_mac_phy_caps++;
4488     return 0;
4489 }
4490 
4491 static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
4492                          u16 tag, u16 len,
4493                          const void *ptr, void *data)
4494 {
4495     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4496 
4497     if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4498         return -EPROTO;
4499 
4500     if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
4501         return -ENOBUFS;
4502 
4503     svc_rdy_ext->n_ext_hal_reg_caps++;
4504     return 0;
4505 }
4506 
4507 static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
4508                        u16 len, const void *ptr, void *data)
4509 {
4510     struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
4511     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4512     struct ath11k_hal_reg_capabilities_ext reg_cap;
4513     int ret;
4514     u32 i;
4515 
4516     svc_rdy_ext->n_ext_hal_reg_caps = 0;
4517     svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
4518     ret = ath11k_wmi_tlv_iter(soc, ptr, len,
4519                   ath11k_wmi_tlv_ext_hal_reg_caps_parse,
4520                   svc_rdy_ext);
4521     if (ret) {
4522         ath11k_warn(soc, "failed to parse tlv %d\n", ret);
4523         return ret;
4524     }
4525 
4526     for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
4527         ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4528                               svc_rdy_ext->soc_hal_reg_caps,
4529                               svc_rdy_ext->ext_hal_reg_caps, i,
4530                               &reg_cap);
4531         if (ret) {
4532             ath11k_warn(soc, "failed to extract reg cap %d\n", i);
4533             return ret;
4534         }
4535 
4536         memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
4537                &reg_cap, sizeof(reg_cap));
4538     }
4539     return 0;
4540 }
4541 
4542 static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
4543                              u16 len, const void *ptr,
4544                              void *data)
4545 {
4546     struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
4547     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4548     u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
4549     u32 phy_id_map;
4550     int pdev_index = 0;
4551     int ret;
4552 
4553     svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
4554     svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
4555 
4556     soc->num_radios = 0;
4557     soc->target_pdev_count = 0;
4558     phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
4559 
4560     while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4561         ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4562                                 svc_rdy_ext->hw_caps,
4563                                 svc_rdy_ext->hw_mode_caps,
4564                                 svc_rdy_ext->soc_hal_reg_caps,
4565                                 svc_rdy_ext->mac_phy_caps,
4566                                 hw_mode_id, soc->num_radios,
4567                                 &soc->pdevs[pdev_index]);
4568         if (ret) {
4569             ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
4570                     soc->num_radios);
4571             return ret;
4572         }
4573 
4574         soc->num_radios++;
4575 
4576         /* For QCA6390, save mac_phy capability in the same pdev */
4577         if (soc->hw_params.single_pdev_only)
4578             pdev_index = 0;
4579         else
4580             pdev_index = soc->num_radios;
4581 
4582         /* TODO: mac_phy_cap prints */
4583         phy_id_map >>= 1;
4584     }
4585 
4586     /* For QCA6390, set num_radios to 1 because host manages
4587      * both 2G and 5G radio in one pdev.
4588      * Set pdev_id = 0 and 0 means soc level.
4589      */
4590     if (soc->hw_params.single_pdev_only) {
4591         soc->num_radios = 1;
4592         soc->pdevs[0].pdev_id = 0;
4593     }
4594 
4595     return 0;
4596 }
4597 
4598 static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
4599                           u16 tag, u16 len,
4600                           const void *ptr, void *data)
4601 {
4602     struct wmi_tlv_dma_ring_caps_parse *parse = data;
4603 
4604     if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4605         return -EPROTO;
4606 
4607     parse->n_dma_ring_caps++;
4608     return 0;
4609 }
4610 
4611 static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
4612                     u32 num_cap)
4613 {
4614     size_t sz;
4615     void *ptr;
4616 
4617     sz = num_cap * sizeof(struct ath11k_dbring_cap);
4618     ptr = kzalloc(sz, GFP_ATOMIC);
4619     if (!ptr)
4620         return -ENOMEM;
4621 
4622     ab->db_caps = ptr;
4623     ab->num_db_cap = num_cap;
4624 
4625     return 0;
4626 }
4627 
4628 static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
4629 {
4630     kfree(ab->db_caps);
4631     ab->db_caps = NULL;
4632 }
4633 
4634 static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
4635                     u16 len, const void *ptr, void *data)
4636 {
4637     struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
4638     struct wmi_dma_ring_capabilities *dma_caps;
4639     struct ath11k_dbring_cap *dir_buff_caps;
4640     int ret;
4641     u32 i;
4642 
4643     dma_caps_parse->n_dma_ring_caps = 0;
4644     dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
4645     ret = ath11k_wmi_tlv_iter(ab, ptr, len,
4646                   ath11k_wmi_tlv_dma_ring_caps_parse,
4647                   dma_caps_parse);
4648     if (ret) {
4649         ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4650         return ret;
4651     }
4652 
4653     if (!dma_caps_parse->n_dma_ring_caps)
4654         return 0;
4655 
4656     if (ab->num_db_cap) {
4657         ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4658         return 0;
4659     }
4660 
4661     ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4662     if (ret)
4663         return ret;
4664 
4665     dir_buff_caps = ab->db_caps;
4666     for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4667         if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
4668             ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
4669             ret = -EINVAL;
4670             goto free_dir_buff;
4671         }
4672 
4673         dir_buff_caps[i].id = dma_caps[i].module_id;
4674         dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
4675         dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
4676         dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
4677         dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
4678     }
4679 
4680     return 0;
4681 
4682 free_dir_buff:
4683     ath11k_wmi_free_dbring_caps(ab);
4684     return ret;
4685 }
4686 
4687 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
4688                         u16 tag, u16 len,
4689                         const void *ptr, void *data)
4690 {
4691     struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
4692     struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
4693     int ret;
4694 
4695     switch (tag) {
4696     case WMI_TAG_SERVICE_READY_EXT_EVENT:
4697         ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
4698                         &svc_rdy_ext->param);
4699         if (ret) {
4700             ath11k_warn(ab, "unable to extract ext params\n");
4701             return ret;
4702         }
4703         break;
4704 
4705     case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
4706         svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
4707         svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
4708         break;
4709 
4710     case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
4711         ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
4712                                 svc_rdy_ext);
4713         if (ret)
4714             return ret;
4715         break;
4716 
4717     case WMI_TAG_ARRAY_STRUCT:
4718         if (!svc_rdy_ext->hw_mode_done) {
4719             ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
4720                               svc_rdy_ext);
4721             if (ret)
4722                 return ret;
4723 
4724             svc_rdy_ext->hw_mode_done = true;
4725         } else if (!svc_rdy_ext->mac_phy_done) {
4726             svc_rdy_ext->n_mac_phy_caps = 0;
4727             ret = ath11k_wmi_tlv_iter(ab, ptr, len,
4728                           ath11k_wmi_tlv_mac_phy_caps_parse,
4729                           svc_rdy_ext);
4730             if (ret) {
4731                 ath11k_warn(ab, "failed to parse tlv %d\n", ret);
4732                 return ret;
4733             }
4734 
4735             svc_rdy_ext->mac_phy_done = true;
4736         } else if (!svc_rdy_ext->ext_hal_reg_done) {
4737             ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
4738                                   svc_rdy_ext);
4739             if (ret)
4740                 return ret;
4741 
4742             svc_rdy_ext->ext_hal_reg_done = true;
4743         } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
4744             svc_rdy_ext->mac_phy_chainmask_combo_done = true;
4745         } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
4746             svc_rdy_ext->mac_phy_chainmask_cap_done = true;
4747         } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
4748             svc_rdy_ext->oem_dma_ring_cap_done = true;
4749         } else if (!svc_rdy_ext->dma_ring_cap_done) {
4750             ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
4751                                &svc_rdy_ext->dma_caps_parse);
4752             if (ret)
4753                 return ret;
4754 
4755             svc_rdy_ext->dma_ring_cap_done = true;
4756         }
4757         break;
4758 
4759     default:
4760         break;
4761     }
4762     return 0;
4763 }
4764 
4765 static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
4766                       struct sk_buff *skb)
4767 {
4768     struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
4769     int ret;
4770 
4771     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
4772                   ath11k_wmi_tlv_svc_rdy_ext_parse,
4773                   &svc_rdy_ext);
4774     if (ret) {
4775         ath11k_warn(ab, "failed to parse tlv %d\n", ret);
4776         goto err;
4777     }
4778 
4779     if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4780         complete(&ab->wmi_ab.service_ready);
4781 
4782     kfree(svc_rdy_ext.mac_phy_caps);
4783     return 0;
4784 
4785 err:
4786     ath11k_wmi_free_dbring_caps(ab);
4787     return ret;
4788 }
4789 
4790 static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
4791                          u16 tag, u16 len,
4792                          const void *ptr, void *data)
4793 {
4794     struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
4795     int ret;
4796 
4797     switch (tag) {
4798     case WMI_TAG_ARRAY_STRUCT:
4799         if (!parse->dma_ring_cap_done) {
4800             ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
4801                                &parse->dma_caps_parse);
4802             if (ret)
4803                 return ret;
4804 
4805             parse->dma_ring_cap_done = true;
4806         }
4807         break;
4808     default:
4809         break;
4810     }
4811 
4812     return 0;
4813 }
4814 
4815 static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
4816                        struct sk_buff *skb)
4817 {
4818     struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4819     int ret;
4820 
4821     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
4822                   ath11k_wmi_tlv_svc_rdy_ext2_parse,
4823                   &svc_rdy_ext2);
4824     if (ret) {
4825         ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4826         goto err;
4827     }
4828 
4829     complete(&ab->wmi_ab.service_ready);
4830 
4831     return 0;
4832 
4833 err:
4834     ath11k_wmi_free_dbring_caps(ab);
4835     return ret;
4836 }
4837 
4838 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
4839                        struct wmi_vdev_start_resp_event *vdev_rsp)
4840 {
4841     const void **tb;
4842     const struct wmi_vdev_start_resp_event *ev;
4843     int ret;
4844 
4845     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4846     if (IS_ERR(tb)) {
4847         ret = PTR_ERR(tb);
4848         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
4849         return ret;
4850     }
4851 
4852     ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4853     if (!ev) {
4854         ath11k_warn(ab, "failed to fetch vdev start resp ev");
4855         kfree(tb);
4856         return -EPROTO;
4857     }
4858 
4859     memset(vdev_rsp, 0, sizeof(*vdev_rsp));
4860 
4861     vdev_rsp->vdev_id = ev->vdev_id;
4862     vdev_rsp->requestor_id = ev->requestor_id;
4863     vdev_rsp->resp_type = ev->resp_type;
4864     vdev_rsp->status = ev->status;
4865     vdev_rsp->chain_mask = ev->chain_mask;
4866     vdev_rsp->smps_mode = ev->smps_mode;
4867     vdev_rsp->mac_id = ev->mac_id;
4868     vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
4869     vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
4870 
4871     kfree(tb);
4872     return 0;
4873 }
4874 
4875 static struct cur_reg_rule
4876 *create_reg_rules_from_wmi(u32 num_reg_rules,
4877                struct wmi_regulatory_rule_struct *wmi_reg_rule)
4878 {
4879     struct cur_reg_rule *reg_rule_ptr;
4880     u32 count;
4881 
4882     reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr),
4883                    GFP_ATOMIC);
4884 
4885     if (!reg_rule_ptr)
4886         return NULL;
4887 
4888     for (count = 0; count < num_reg_rules; count++) {
4889         reg_rule_ptr[count].start_freq =
4890             FIELD_GET(REG_RULE_START_FREQ,
4891                   wmi_reg_rule[count].freq_info);
4892         reg_rule_ptr[count].end_freq =
4893             FIELD_GET(REG_RULE_END_FREQ,
4894                   wmi_reg_rule[count].freq_info);
4895         reg_rule_ptr[count].max_bw =
4896             FIELD_GET(REG_RULE_MAX_BW,
4897                   wmi_reg_rule[count].bw_pwr_info);
4898         reg_rule_ptr[count].reg_power =
4899             FIELD_GET(REG_RULE_REG_PWR,
4900                   wmi_reg_rule[count].bw_pwr_info);
4901         reg_rule_ptr[count].ant_gain =
4902             FIELD_GET(REG_RULE_ANT_GAIN,
4903                   wmi_reg_rule[count].bw_pwr_info);
4904         reg_rule_ptr[count].flags =
4905             FIELD_GET(REG_RULE_FLAGS,
4906                   wmi_reg_rule[count].flag_info);
4907     }
4908 
4909     return reg_rule_ptr;
4910 }
4911 
4912 static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
4913                            struct sk_buff *skb,
4914                            struct cur_regulatory_info *reg_info)
4915 {
4916     const void **tb;
4917     const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
4918     struct wmi_regulatory_rule_struct *wmi_reg_rule;
4919     u32 num_2g_reg_rules, num_5g_reg_rules;
4920     int ret;
4921 
4922     ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
4923 
4924     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4925     if (IS_ERR(tb)) {
4926         ret = PTR_ERR(tb);
4927         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
4928         return ret;
4929     }
4930 
4931     chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
4932     if (!chan_list_event_hdr) {
4933         ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
4934         kfree(tb);
4935         return -EPROTO;
4936     }
4937 
4938     reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules;
4939     reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules;
4940 
4941     if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) {
4942         ath11k_warn(ab, "No regulatory rules available in the event info\n");
4943         kfree(tb);
4944         return -EINVAL;
4945     }
4946 
4947     memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
4948            REG_ALPHA2_LEN);
4949     reg_info->dfs_region = chan_list_event_hdr->dfs_region;
4950     reg_info->phybitmap = chan_list_event_hdr->phybitmap;
4951     reg_info->num_phy = chan_list_event_hdr->num_phy;
4952     reg_info->phy_id = chan_list_event_hdr->phy_id;
4953     reg_info->ctry_code = chan_list_event_hdr->country_id;
4954     reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
4955     if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_PASS)
4956         reg_info->status_code = REG_SET_CC_STATUS_PASS;
4957     else if (chan_list_event_hdr->status_code == WMI_REG_CURRENT_ALPHA2_NOT_FOUND)
4958         reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4959     else if (chan_list_event_hdr->status_code == WMI_REG_INIT_ALPHA2_NOT_FOUND)
4960         reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4961     else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_CHANGE_NOT_ALLOWED)
4962         reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4963     else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_NO_MEMORY)
4964         reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4965     else if (chan_list_event_hdr->status_code == WMI_REG_SET_CC_STATUS_FAIL)
4966         reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4967 
4968     reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g;
4969     reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g;
4970     reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g;
4971     reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g;
4972 
4973     num_2g_reg_rules = reg_info->num_2g_reg_rules;
4974     num_5g_reg_rules = reg_info->num_5g_reg_rules;
4975 
4976     ath11k_dbg(ab, ATH11K_DBG_WMI,
4977            "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
4978            __func__, reg_info->alpha2, reg_info->dfs_region,
4979            reg_info->min_bw_2g, reg_info->max_bw_2g,
4980            reg_info->min_bw_5g, reg_info->max_bw_5g);
4981 
4982     ath11k_dbg(ab, ATH11K_DBG_WMI,
4983            "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__,
4984            num_2g_reg_rules, num_5g_reg_rules);
4985 
4986     wmi_reg_rule =
4987         (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
4988                         + sizeof(*chan_list_event_hdr)
4989                         + sizeof(struct wmi_tlv));
4990 
4991     if (num_2g_reg_rules) {
4992         reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules,
4993                                        wmi_reg_rule);
4994         if (!reg_info->reg_rules_2g_ptr) {
4995             kfree(tb);
4996             ath11k_warn(ab, "Unable to Allocate memory for 2g rules\n");
4997             return -ENOMEM;
4998         }
4999     }
5000 
5001     if (num_5g_reg_rules) {
5002         wmi_reg_rule += num_2g_reg_rules;
5003         reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules,
5004                                        wmi_reg_rule);
5005         if (!reg_info->reg_rules_5g_ptr) {
5006             kfree(tb);
5007             ath11k_warn(ab, "Unable to Allocate memory for 5g rules\n");
5008             return -ENOMEM;
5009         }
5010     }
5011 
5012     ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
5013 
5014     kfree(tb);
5015     return 0;
5016 }
5017 
5018 static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
5019                     struct wmi_peer_delete_resp_event *peer_del_resp)
5020 {
5021     const void **tb;
5022     const struct wmi_peer_delete_resp_event *ev;
5023     int ret;
5024 
5025     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5026     if (IS_ERR(tb)) {
5027         ret = PTR_ERR(tb);
5028         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5029         return ret;
5030     }
5031 
5032     ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
5033     if (!ev) {
5034         ath11k_warn(ab, "failed to fetch peer delete resp ev");
5035         kfree(tb);
5036         return -EPROTO;
5037     }
5038 
5039     memset(peer_del_resp, 0, sizeof(*peer_del_resp));
5040 
5041     peer_del_resp->vdev_id = ev->vdev_id;
5042     ether_addr_copy(peer_del_resp->peer_macaddr.addr,
5043             ev->peer_macaddr.addr);
5044 
5045     kfree(tb);
5046     return 0;
5047 }
5048 
5049 static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
5050                     struct sk_buff *skb,
5051                     u32 *vdev_id)
5052 {
5053     const void **tb;
5054     const struct wmi_vdev_delete_resp_event *ev;
5055     int ret;
5056 
5057     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5058     if (IS_ERR(tb)) {
5059         ret = PTR_ERR(tb);
5060         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5061         return ret;
5062     }
5063 
5064     ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
5065     if (!ev) {
5066         ath11k_warn(ab, "failed to fetch vdev delete resp ev");
5067         kfree(tb);
5068         return -EPROTO;
5069     }
5070 
5071     *vdev_id = ev->vdev_id;
5072 
5073     kfree(tb);
5074     return 0;
5075 }
5076 
5077 static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
5078                     u32 len, u32 *vdev_id,
5079                     u32 *tx_status)
5080 {
5081     const void **tb;
5082     const struct wmi_bcn_tx_status_event *ev;
5083     int ret;
5084 
5085     tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
5086     if (IS_ERR(tb)) {
5087         ret = PTR_ERR(tb);
5088         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5089         return ret;
5090     }
5091 
5092     ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
5093     if (!ev) {
5094         ath11k_warn(ab, "failed to fetch bcn tx status ev");
5095         kfree(tb);
5096         return -EPROTO;
5097     }
5098 
5099     *vdev_id   = ev->vdev_id;
5100     *tx_status = ev->tx_status;
5101 
5102     kfree(tb);
5103     return 0;
5104 }
5105 
5106 static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
5107                           u32 *vdev_id)
5108 {
5109     const void **tb;
5110     const struct wmi_vdev_stopped_event *ev;
5111     int ret;
5112 
5113     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5114     if (IS_ERR(tb)) {
5115         ret = PTR_ERR(tb);
5116         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5117         return ret;
5118     }
5119 
5120     ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
5121     if (!ev) {
5122         ath11k_warn(ab, "failed to fetch vdev stop ev");
5123         kfree(tb);
5124         return -EPROTO;
5125     }
5126 
5127     *vdev_id =  ev->vdev_id;
5128 
5129     kfree(tb);
5130     return 0;
5131 }
5132 
5133 static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
5134                       struct sk_buff *skb,
5135                       struct mgmt_rx_event_params *hdr)
5136 {
5137     const void **tb;
5138     const struct wmi_mgmt_rx_hdr *ev;
5139     const u8 *frame;
5140     int ret;
5141 
5142     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5143     if (IS_ERR(tb)) {
5144         ret = PTR_ERR(tb);
5145         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5146         return ret;
5147     }
5148 
5149     ev = tb[WMI_TAG_MGMT_RX_HDR];
5150     frame = tb[WMI_TAG_ARRAY_BYTE];
5151 
5152     if (!ev || !frame) {
5153         ath11k_warn(ab, "failed to fetch mgmt rx hdr");
5154         kfree(tb);
5155         return -EPROTO;
5156     }
5157 
5158     hdr->pdev_id =  ev->pdev_id;
5159     hdr->chan_freq = ev->chan_freq;
5160     hdr->channel =  ev->channel;
5161     hdr->snr =  ev->snr;
5162     hdr->rate =  ev->rate;
5163     hdr->phy_mode =  ev->phy_mode;
5164     hdr->buf_len =  ev->buf_len;
5165     hdr->status =  ev->status;
5166     hdr->flags =  ev->flags;
5167     hdr->rssi =  ev->rssi;
5168     hdr->tsf_delta =  ev->tsf_delta;
5169     memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
5170 
5171     if (skb->len < (frame - skb->data) + hdr->buf_len) {
5172         ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
5173         kfree(tb);
5174         return -EPROTO;
5175     }
5176 
5177     /* shift the sk_buff to point to `frame` */
5178     skb_trim(skb, 0);
5179     skb_put(skb, frame - skb->data);
5180     skb_pull(skb, frame - skb->data);
5181     skb_put(skb, hdr->buf_len);
5182 
5183     ath11k_ce_byte_swap(skb->data, hdr->buf_len);
5184 
5185     kfree(tb);
5186     return 0;
5187 }
5188 
5189 static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
5190                     u32 status)
5191 {
5192     struct sk_buff *msdu;
5193     struct ieee80211_tx_info *info;
5194     struct ath11k_skb_cb *skb_cb;
5195     int num_mgmt;
5196 
5197     spin_lock_bh(&ar->txmgmt_idr_lock);
5198     msdu = idr_find(&ar->txmgmt_idr, desc_id);
5199 
5200     if (!msdu) {
5201         ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
5202                 desc_id);
5203         spin_unlock_bh(&ar->txmgmt_idr_lock);
5204         return -ENOENT;
5205     }
5206 
5207     idr_remove(&ar->txmgmt_idr, desc_id);
5208     spin_unlock_bh(&ar->txmgmt_idr_lock);
5209 
5210     skb_cb = ATH11K_SKB_CB(msdu);
5211     dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
5212 
5213     info = IEEE80211_SKB_CB(msdu);
5214     if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
5215         info->flags |= IEEE80211_TX_STAT_ACK;
5216 
5217     ieee80211_tx_status_irqsafe(ar->hw, msdu);
5218 
5219     num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5220 
5221     /* WARN when we received this event without doing any mgmt tx */
5222     if (num_mgmt < 0)
5223         WARN_ON_ONCE(1);
5224 
5225     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
5226            "wmi mgmt tx comp pending %d desc id %d\n",
5227            num_mgmt, desc_id);
5228 
5229     if (!num_mgmt)
5230         wake_up(&ar->txmgmt_empty_waitq);
5231 
5232     return 0;
5233 }
5234 
5235 static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
5236                            struct sk_buff *skb,
5237                            struct wmi_mgmt_tx_compl_event *param)
5238 {
5239     const void **tb;
5240     const struct wmi_mgmt_tx_compl_event *ev;
5241     int ret;
5242 
5243     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5244     if (IS_ERR(tb)) {
5245         ret = PTR_ERR(tb);
5246         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5247         return ret;
5248     }
5249 
5250     ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
5251     if (!ev) {
5252         ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
5253         kfree(tb);
5254         return -EPROTO;
5255     }
5256 
5257     param->pdev_id = ev->pdev_id;
5258     param->desc_id = ev->desc_id;
5259     param->status = ev->status;
5260 
5261     kfree(tb);
5262     return 0;
5263 }
5264 
5265 static void ath11k_wmi_event_scan_started(struct ath11k *ar)
5266 {
5267     lockdep_assert_held(&ar->data_lock);
5268 
5269     switch (ar->scan.state) {
5270     case ATH11K_SCAN_IDLE:
5271     case ATH11K_SCAN_RUNNING:
5272     case ATH11K_SCAN_ABORTING:
5273         ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
5274                 ath11k_scan_state_str(ar->scan.state),
5275                 ar->scan.state);
5276         break;
5277     case ATH11K_SCAN_STARTING:
5278         ar->scan.state = ATH11K_SCAN_RUNNING;
5279         if (ar->scan.is_roc)
5280             ieee80211_ready_on_channel(ar->hw);
5281         complete(&ar->scan.started);
5282         break;
5283     }
5284 }
5285 
5286 static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
5287 {
5288     lockdep_assert_held(&ar->data_lock);
5289 
5290     switch (ar->scan.state) {
5291     case ATH11K_SCAN_IDLE:
5292     case ATH11K_SCAN_RUNNING:
5293     case ATH11K_SCAN_ABORTING:
5294         ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
5295                 ath11k_scan_state_str(ar->scan.state),
5296                 ar->scan.state);
5297         break;
5298     case ATH11K_SCAN_STARTING:
5299         complete(&ar->scan.started);
5300         __ath11k_mac_scan_finish(ar);
5301         break;
5302     }
5303 }
5304 
5305 static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
5306 {
5307     lockdep_assert_held(&ar->data_lock);
5308 
5309     switch (ar->scan.state) {
5310     case ATH11K_SCAN_IDLE:
5311     case ATH11K_SCAN_STARTING:
5312         /* One suspected reason scan can be completed while starting is
5313          * if firmware fails to deliver all scan events to the host,
5314          * e.g. when transport pipe is full. This has been observed
5315          * with spectral scan phyerr events starving wmi transport
5316          * pipe. In such case the "scan completed" event should be (and
5317          * is) ignored by the host as it may be just firmware's scan
5318          * state machine recovering.
5319          */
5320         ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
5321                 ath11k_scan_state_str(ar->scan.state),
5322                 ar->scan.state);
5323         break;
5324     case ATH11K_SCAN_RUNNING:
5325     case ATH11K_SCAN_ABORTING:
5326         __ath11k_mac_scan_finish(ar);
5327         break;
5328     }
5329 }
5330 
5331 static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
5332 {
5333     lockdep_assert_held(&ar->data_lock);
5334 
5335     switch (ar->scan.state) {
5336     case ATH11K_SCAN_IDLE:
5337     case ATH11K_SCAN_STARTING:
5338         ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
5339                 ath11k_scan_state_str(ar->scan.state),
5340                 ar->scan.state);
5341         break;
5342     case ATH11K_SCAN_RUNNING:
5343     case ATH11K_SCAN_ABORTING:
5344         ar->scan_channel = NULL;
5345         break;
5346     }
5347 }
5348 
5349 static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
5350 {
5351     lockdep_assert_held(&ar->data_lock);
5352 
5353     switch (ar->scan.state) {
5354     case ATH11K_SCAN_IDLE:
5355     case ATH11K_SCAN_STARTING:
5356         ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
5357                 ath11k_scan_state_str(ar->scan.state),
5358                 ar->scan.state);
5359         break;
5360     case ATH11K_SCAN_RUNNING:
5361     case ATH11K_SCAN_ABORTING:
5362         ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
5363         if (ar->scan.is_roc && ar->scan.roc_freq == freq)
5364             complete(&ar->scan.on_channel);
5365         break;
5366     }
5367 }
5368 
5369 static const char *
5370 ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
5371                    enum wmi_scan_completion_reason reason)
5372 {
5373     switch (type) {
5374     case WMI_SCAN_EVENT_STARTED:
5375         return "started";
5376     case WMI_SCAN_EVENT_COMPLETED:
5377         switch (reason) {
5378         case WMI_SCAN_REASON_COMPLETED:
5379             return "completed";
5380         case WMI_SCAN_REASON_CANCELLED:
5381             return "completed [cancelled]";
5382         case WMI_SCAN_REASON_PREEMPTED:
5383             return "completed [preempted]";
5384         case WMI_SCAN_REASON_TIMEDOUT:
5385             return "completed [timedout]";
5386         case WMI_SCAN_REASON_INTERNAL_FAILURE:
5387             return "completed [internal err]";
5388         case WMI_SCAN_REASON_MAX:
5389             break;
5390         }
5391         return "completed [unknown]";
5392     case WMI_SCAN_EVENT_BSS_CHANNEL:
5393         return "bss channel";
5394     case WMI_SCAN_EVENT_FOREIGN_CHAN:
5395         return "foreign channel";
5396     case WMI_SCAN_EVENT_DEQUEUED:
5397         return "dequeued";
5398     case WMI_SCAN_EVENT_PREEMPTED:
5399         return "preempted";
5400     case WMI_SCAN_EVENT_START_FAILED:
5401         return "start failed";
5402     case WMI_SCAN_EVENT_RESTARTED:
5403         return "restarted";
5404     case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5405         return "foreign channel exit";
5406     default:
5407         return "unknown";
5408     }
5409 }
5410 
5411 static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
5412                    struct wmi_scan_event *scan_evt_param)
5413 {
5414     const void **tb;
5415     const struct wmi_scan_event *ev;
5416     int ret;
5417 
5418     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5419     if (IS_ERR(tb)) {
5420         ret = PTR_ERR(tb);
5421         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5422         return ret;
5423     }
5424 
5425     ev = tb[WMI_TAG_SCAN_EVENT];
5426     if (!ev) {
5427         ath11k_warn(ab, "failed to fetch scan ev");
5428         kfree(tb);
5429         return -EPROTO;
5430     }
5431 
5432     scan_evt_param->event_type = ev->event_type;
5433     scan_evt_param->reason = ev->reason;
5434     scan_evt_param->channel_freq = ev->channel_freq;
5435     scan_evt_param->scan_req_id = ev->scan_req_id;
5436     scan_evt_param->scan_id = ev->scan_id;
5437     scan_evt_param->vdev_id = ev->vdev_id;
5438     scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
5439 
5440     kfree(tb);
5441     return 0;
5442 }
5443 
5444 static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
5445                        struct wmi_peer_sta_kickout_arg *arg)
5446 {
5447     const void **tb;
5448     const struct wmi_peer_sta_kickout_event *ev;
5449     int ret;
5450 
5451     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5452     if (IS_ERR(tb)) {
5453         ret = PTR_ERR(tb);
5454         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5455         return ret;
5456     }
5457 
5458     ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
5459     if (!ev) {
5460         ath11k_warn(ab, "failed to fetch peer sta kickout ev");
5461         kfree(tb);
5462         return -EPROTO;
5463     }
5464 
5465     arg->mac_addr = ev->peer_macaddr.addr;
5466 
5467     kfree(tb);
5468     return 0;
5469 }
5470 
5471 static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
5472                    struct wmi_roam_event *roam_ev)
5473 {
5474     const void **tb;
5475     const struct wmi_roam_event *ev;
5476     int ret;
5477 
5478     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5479     if (IS_ERR(tb)) {
5480         ret = PTR_ERR(tb);
5481         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5482         return ret;
5483     }
5484 
5485     ev = tb[WMI_TAG_ROAM_EVENT];
5486     if (!ev) {
5487         ath11k_warn(ab, "failed to fetch roam ev");
5488         kfree(tb);
5489         return -EPROTO;
5490     }
5491 
5492     roam_ev->vdev_id = ev->vdev_id;
5493     roam_ev->reason = ev->reason;
5494     roam_ev->rssi = ev->rssi;
5495 
5496     kfree(tb);
5497     return 0;
5498 }
5499 
5500 static int freq_to_idx(struct ath11k *ar, int freq)
5501 {
5502     struct ieee80211_supported_band *sband;
5503     int band, ch, idx = 0;
5504 
5505     for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
5506         sband = ar->hw->wiphy->bands[band];
5507         if (!sband)
5508             continue;
5509 
5510         for (ch = 0; ch < sband->n_channels; ch++, idx++)
5511             if (sband->channels[ch].center_freq == freq)
5512                 goto exit;
5513     }
5514 
5515 exit:
5516     return idx;
5517 }
5518 
5519 static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
5520                     u32 len, struct wmi_chan_info_event *ch_info_ev)
5521 {
5522     const void **tb;
5523     const struct wmi_chan_info_event *ev;
5524     int ret;
5525 
5526     tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
5527     if (IS_ERR(tb)) {
5528         ret = PTR_ERR(tb);
5529         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5530         return ret;
5531     }
5532 
5533     ev = tb[WMI_TAG_CHAN_INFO_EVENT];
5534     if (!ev) {
5535         ath11k_warn(ab, "failed to fetch chan info ev");
5536         kfree(tb);
5537         return -EPROTO;
5538     }
5539 
5540     ch_info_ev->err_code = ev->err_code;
5541     ch_info_ev->freq = ev->freq;
5542     ch_info_ev->cmd_flags = ev->cmd_flags;
5543     ch_info_ev->noise_floor = ev->noise_floor;
5544     ch_info_ev->rx_clear_count = ev->rx_clear_count;
5545     ch_info_ev->cycle_count = ev->cycle_count;
5546     ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
5547     ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
5548     ch_info_ev->rx_frame_count = ev->rx_frame_count;
5549     ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
5550     ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
5551     ch_info_ev->vdev_id = ev->vdev_id;
5552 
5553     kfree(tb);
5554     return 0;
5555 }
5556 
5557 static int
5558 ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
5559                   struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
5560 {
5561     const void **tb;
5562     const struct wmi_pdev_bss_chan_info_event *ev;
5563     int ret;
5564 
5565     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5566     if (IS_ERR(tb)) {
5567         ret = PTR_ERR(tb);
5568         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5569         return ret;
5570     }
5571 
5572     ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5573     if (!ev) {
5574         ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
5575         kfree(tb);
5576         return -EPROTO;
5577     }
5578 
5579     bss_ch_info_ev->pdev_id = ev->pdev_id;
5580     bss_ch_info_ev->freq = ev->freq;
5581     bss_ch_info_ev->noise_floor = ev->noise_floor;
5582     bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5583     bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5584     bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5585     bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5586     bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5587     bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5588     bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5589     bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5590     bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5591     bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5592 
5593     kfree(tb);
5594     return 0;
5595 }
5596 
5597 static int
5598 ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
5599                       struct wmi_vdev_install_key_complete_arg *arg)
5600 {
5601     const void **tb;
5602     const struct wmi_vdev_install_key_compl_event *ev;
5603     int ret;
5604 
5605     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5606     if (IS_ERR(tb)) {
5607         ret = PTR_ERR(tb);
5608         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5609         return ret;
5610     }
5611 
5612     ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5613     if (!ev) {
5614         ath11k_warn(ab, "failed to fetch vdev install key compl ev");
5615         kfree(tb);
5616         return -EPROTO;
5617     }
5618 
5619     arg->vdev_id = ev->vdev_id;
5620     arg->macaddr = ev->peer_macaddr.addr;
5621     arg->key_idx = ev->key_idx;
5622     arg->key_flags = ev->key_flags;
5623     arg->status = ev->status;
5624 
5625     kfree(tb);
5626     return 0;
5627 }
5628 
5629 static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
5630                       struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5631 {
5632     const void **tb;
5633     const struct wmi_peer_assoc_conf_event *ev;
5634     int ret;
5635 
5636     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5637     if (IS_ERR(tb)) {
5638         ret = PTR_ERR(tb);
5639         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
5640         return ret;
5641     }
5642 
5643     ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5644     if (!ev) {
5645         ath11k_warn(ab, "failed to fetch peer assoc conf ev");
5646         kfree(tb);
5647         return -EPROTO;
5648     }
5649 
5650     peer_assoc_conf->vdev_id = ev->vdev_id;
5651     peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5652 
5653     kfree(tb);
5654     return 0;
5655 }
5656 
5657 static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
5658                         struct ath11k_fw_stats_pdev *dst)
5659 {
5660     dst->ch_noise_floor = src->chan_nf;
5661     dst->tx_frame_count = src->tx_frame_count;
5662     dst->rx_frame_count = src->rx_frame_count;
5663     dst->rx_clear_count = src->rx_clear_count;
5664     dst->cycle_count = src->cycle_count;
5665     dst->phy_err_count = src->phy_err_count;
5666     dst->chan_tx_power = src->chan_tx_pwr;
5667 }
5668 
5669 static void
5670 ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
5671                   struct ath11k_fw_stats_pdev *dst)
5672 {
5673     dst->comp_queued = src->comp_queued;
5674     dst->comp_delivered = src->comp_delivered;
5675     dst->msdu_enqued = src->msdu_enqued;
5676     dst->mpdu_enqued = src->mpdu_enqued;
5677     dst->wmm_drop = src->wmm_drop;
5678     dst->local_enqued = src->local_enqued;
5679     dst->local_freed = src->local_freed;
5680     dst->hw_queued = src->hw_queued;
5681     dst->hw_reaped = src->hw_reaped;
5682     dst->underrun = src->underrun;
5683     dst->hw_paused = src->hw_paused;
5684     dst->tx_abort = src->tx_abort;
5685     dst->mpdus_requeued = src->mpdus_requeued;
5686     dst->tx_ko = src->tx_ko;
5687     dst->tx_xretry = src->tx_xretry;
5688     dst->data_rc = src->data_rc;
5689     dst->self_triggers = src->self_triggers;
5690     dst->sw_retry_failure = src->sw_retry_failure;
5691     dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
5692     dst->pdev_cont_xretry = src->pdev_cont_xretry;
5693     dst->pdev_tx_timeout = src->pdev_tx_timeout;
5694     dst->pdev_resets = src->pdev_resets;
5695     dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
5696     dst->phy_underrun = src->phy_underrun;
5697     dst->txop_ovf = src->txop_ovf;
5698     dst->seq_posted = src->seq_posted;
5699     dst->seq_failed_queueing = src->seq_failed_queueing;
5700     dst->seq_completed = src->seq_completed;
5701     dst->seq_restarted = src->seq_restarted;
5702     dst->mu_seq_posted = src->mu_seq_posted;
5703     dst->mpdus_sw_flush = src->mpdus_sw_flush;
5704     dst->mpdus_hw_filter = src->mpdus_hw_filter;
5705     dst->mpdus_truncated = src->mpdus_truncated;
5706     dst->mpdus_ack_failed = src->mpdus_ack_failed;
5707     dst->mpdus_expired = src->mpdus_expired;
5708 }
5709 
5710 static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
5711                       struct ath11k_fw_stats_pdev *dst)
5712 {
5713     dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
5714     dst->status_rcvd = src->status_rcvd;
5715     dst->r0_frags = src->r0_frags;
5716     dst->r1_frags = src->r1_frags;
5717     dst->r2_frags = src->r2_frags;
5718     dst->r3_frags = src->r3_frags;
5719     dst->htt_msdus = src->htt_msdus;
5720     dst->htt_mpdus = src->htt_mpdus;
5721     dst->loc_msdus = src->loc_msdus;
5722     dst->loc_mpdus = src->loc_mpdus;
5723     dst->oversize_amsdu = src->oversize_amsdu;
5724     dst->phy_errs = src->phy_errs;
5725     dst->phy_err_drop = src->phy_err_drop;
5726     dst->mpdu_errs = src->mpdu_errs;
5727     dst->rx_ovfl_errs = src->rx_ovfl_errs;
5728 }
5729 
5730 static void
5731 ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
5732                struct ath11k_fw_stats_vdev *dst)
5733 {
5734     int i;
5735 
5736     dst->vdev_id = src->vdev_id;
5737     dst->beacon_snr = src->beacon_snr;
5738     dst->data_snr = src->data_snr;
5739     dst->num_rx_frames = src->num_rx_frames;
5740     dst->num_rts_fail = src->num_rts_fail;
5741     dst->num_rts_success = src->num_rts_success;
5742     dst->num_rx_err = src->num_rx_err;
5743     dst->num_rx_discard = src->num_rx_discard;
5744     dst->num_tx_not_acked = src->num_tx_not_acked;
5745 
5746     for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
5747         dst->num_tx_frames[i] = src->num_tx_frames[i];
5748 
5749     for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
5750         dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
5751 
5752     for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
5753         dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
5754 
5755     for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
5756         dst->tx_rate_history[i] = src->tx_rate_history[i];
5757 
5758     for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
5759         dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
5760 }
5761 
5762 static void
5763 ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
5764               struct ath11k_fw_stats_bcn *dst)
5765 {
5766     dst->vdev_id = src->vdev_id;
5767     dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
5768     dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
5769 }
5770 
5771 static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
5772                        u16 tag, u16 len,
5773                        const void *ptr, void *data)
5774 {
5775     struct wmi_tlv_fw_stats_parse *parse = data;
5776     const struct wmi_stats_event *ev = parse->ev;
5777     struct ath11k_fw_stats *stats = parse->stats;
5778     struct ath11k *ar;
5779     struct ath11k_vif *arvif;
5780     struct ieee80211_sta *sta;
5781     struct ath11k_sta *arsta;
5782     const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
5783     int j, ret = 0;
5784 
5785     if (tag != WMI_TAG_RSSI_STATS)
5786         return -EPROTO;
5787 
5788     rcu_read_lock();
5789 
5790     ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
5791     stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
5792 
5793     ath11k_dbg(ab, ATH11K_DBG_WMI,
5794            "wmi stats vdev id %d mac %pM\n",
5795            stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
5796 
5797     arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
5798     if (!arvif) {
5799         ath11k_warn(ab, "not found vif for vdev id %d\n",
5800                 stats_rssi->vdev_id);
5801         ret = -EPROTO;
5802         goto exit;
5803     }
5804 
5805     ath11k_dbg(ab, ATH11K_DBG_WMI,
5806            "wmi stats bssid %pM vif %pK\n",
5807            arvif->bssid, arvif->vif);
5808 
5809     sta = ieee80211_find_sta_by_ifaddr(ar->hw,
5810                        arvif->bssid,
5811                        NULL);
5812     if (!sta) {
5813         ath11k_dbg(ab, ATH11K_DBG_WMI,
5814                "not found station of bssid %pM for rssi chain\n",
5815                arvif->bssid);
5816         goto exit;
5817     }
5818 
5819     arsta = (struct ath11k_sta *)sta->drv_priv;
5820 
5821     BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
5822              ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
5823 
5824     for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
5825         arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
5826         ath11k_dbg(ab, ATH11K_DBG_WMI,
5827                "wmi stats beacon rssi[%d] %d data rssi[%d] %d\n",
5828                j,
5829                stats_rssi->rssi_avg_beacon[j],
5830                j,
5831                stats_rssi->rssi_avg_data[j]);
5832     }
5833 
5834 exit:
5835     rcu_read_unlock();
5836     return ret;
5837 }
5838 
5839 static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
5840                           struct wmi_tlv_fw_stats_parse *parse,
5841                           const void *ptr,
5842                           u16 len)
5843 {
5844     struct ath11k_fw_stats *stats = parse->stats;
5845     const struct wmi_stats_event *ev = parse->ev;
5846     struct ath11k *ar;
5847     struct ath11k_vif *arvif;
5848     struct ieee80211_sta *sta;
5849     struct ath11k_sta *arsta;
5850     int i, ret = 0;
5851     const void *data = ptr;
5852 
5853     if (!ev) {
5854         ath11k_warn(ab, "failed to fetch update stats ev");
5855         return -EPROTO;
5856     }
5857 
5858     stats->stats_id = 0;
5859 
5860     rcu_read_lock();
5861 
5862     ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
5863 
5864     for (i = 0; i < ev->num_pdev_stats; i++) {
5865         const struct wmi_pdev_stats *src;
5866         struct ath11k_fw_stats_pdev *dst;
5867 
5868         src = data;
5869         if (len < sizeof(*src)) {
5870             ret = -EPROTO;
5871             goto exit;
5872         }
5873 
5874         stats->stats_id = WMI_REQUEST_PDEV_STAT;
5875 
5876         data += sizeof(*src);
5877         len -= sizeof(*src);
5878 
5879         dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
5880         if (!dst)
5881             continue;
5882 
5883         ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
5884         ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
5885         ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
5886         list_add_tail(&dst->list, &stats->pdevs);
5887     }
5888 
5889     for (i = 0; i < ev->num_vdev_stats; i++) {
5890         const struct wmi_vdev_stats *src;
5891         struct ath11k_fw_stats_vdev *dst;
5892 
5893         src = data;
5894         if (len < sizeof(*src)) {
5895             ret = -EPROTO;
5896             goto exit;
5897         }
5898 
5899         stats->stats_id = WMI_REQUEST_VDEV_STAT;
5900 
5901         arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
5902         if (arvif) {
5903             sta = ieee80211_find_sta_by_ifaddr(ar->hw,
5904                                arvif->bssid,
5905                                NULL);
5906             if (sta) {
5907                 arsta = (struct ath11k_sta *)sta->drv_priv;
5908                 arsta->rssi_beacon = src->beacon_snr;
5909                 ath11k_dbg(ab, ATH11K_DBG_WMI,
5910                        "wmi stats vdev id %d snr %d\n",
5911                        src->vdev_id, src->beacon_snr);
5912             } else {
5913                 ath11k_dbg(ab, ATH11K_DBG_WMI,
5914                        "not found station of bssid %pM for vdev stat\n",
5915                        arvif->bssid);
5916             }
5917         }
5918 
5919         data += sizeof(*src);
5920         len -= sizeof(*src);
5921 
5922         dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
5923         if (!dst)
5924             continue;
5925 
5926         ath11k_wmi_pull_vdev_stats(src, dst);
5927         list_add_tail(&dst->list, &stats->vdevs);
5928     }
5929 
5930     for (i = 0; i < ev->num_bcn_stats; i++) {
5931         const struct wmi_bcn_stats *src;
5932         struct ath11k_fw_stats_bcn *dst;
5933 
5934         src = data;
5935         if (len < sizeof(*src)) {
5936             ret = -EPROTO;
5937             goto exit;
5938         }
5939 
5940         stats->stats_id = WMI_REQUEST_BCN_STAT;
5941 
5942         data += sizeof(*src);
5943         len -= sizeof(*src);
5944 
5945         dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
5946         if (!dst)
5947             continue;
5948 
5949         ath11k_wmi_pull_bcn_stats(src, dst);
5950         list_add_tail(&dst->list, &stats->bcn);
5951     }
5952 
5953 exit:
5954     rcu_read_unlock();
5955     return ret;
5956 }
5957 
5958 static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
5959                      u16 tag, u16 len,
5960                      const void *ptr, void *data)
5961 {
5962     struct wmi_tlv_fw_stats_parse *parse = data;
5963     int ret = 0;
5964 
5965     switch (tag) {
5966     case WMI_TAG_STATS_EVENT:
5967         parse->ev = (struct wmi_stats_event *)ptr;
5968         parse->stats->pdev_id = parse->ev->pdev_id;
5969         break;
5970     case WMI_TAG_ARRAY_BYTE:
5971         ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
5972         break;
5973     case WMI_TAG_PER_CHAIN_RSSI_STATS:
5974         parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
5975 
5976         if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
5977             parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
5978 
5979         ath11k_dbg(ab, ATH11K_DBG_WMI,
5980                "wmi stats id 0x%x num chain %d\n",
5981                parse->ev->stats_id,
5982                parse->rssi_num);
5983         break;
5984     case WMI_TAG_ARRAY_STRUCT:
5985         if (parse->rssi_num && !parse->chain_rssi_done) {
5986             ret = ath11k_wmi_tlv_iter(ab, ptr, len,
5987                           ath11k_wmi_tlv_rssi_chain_parse,
5988                           parse);
5989             if (ret) {
5990                 ath11k_warn(ab, "failed to parse rssi chain %d\n",
5991                         ret);
5992                 return ret;
5993             }
5994             parse->chain_rssi_done = true;
5995         }
5996         break;
5997     default:
5998         break;
5999     }
6000     return ret;
6001 }
6002 
6003 int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
6004                  struct ath11k_fw_stats *stats)
6005 {
6006     struct wmi_tlv_fw_stats_parse parse = { };
6007 
6008     stats->stats_id = 0;
6009     parse.stats = stats;
6010 
6011     return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
6012                    ath11k_wmi_tlv_fw_stats_parse,
6013                    &parse);
6014 }
6015 
6016 size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
6017 {
6018     struct ath11k_fw_stats_vdev *i;
6019     size_t num = 0;
6020 
6021     list_for_each_entry(i, head, list)
6022         ++num;
6023 
6024     return num;
6025 }
6026 
6027 static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head)
6028 {
6029     struct ath11k_fw_stats_bcn *i;
6030     size_t num = 0;
6031 
6032     list_for_each_entry(i, head, list)
6033         ++num;
6034 
6035     return num;
6036 }
6037 
6038 static void
6039 ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
6040                    char *buf, u32 *length)
6041 {
6042     u32 len = *length;
6043     u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
6044 
6045     len += scnprintf(buf + len, buf_len - len, "\n");
6046     len += scnprintf(buf + len, buf_len - len, "%30s\n",
6047             "ath11k PDEV stats");
6048     len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6049             "=================");
6050 
6051     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6052             "Channel noise floor", pdev->ch_noise_floor);
6053     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6054             "Channel TX power", pdev->chan_tx_power);
6055     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6056             "TX frame count", pdev->tx_frame_count);
6057     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6058             "RX frame count", pdev->rx_frame_count);
6059     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6060             "RX clear count", pdev->rx_clear_count);
6061     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6062             "Cycle count", pdev->cycle_count);
6063     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6064             "PHY error count", pdev->phy_err_count);
6065 
6066     *length = len;
6067 }
6068 
6069 static void
6070 ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
6071                  char *buf, u32 *length)
6072 {
6073     u32 len = *length;
6074     u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
6075 
6076     len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
6077              "ath11k PDEV TX stats");
6078     len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6079              "====================");
6080 
6081     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6082              "HTT cookies queued", pdev->comp_queued);
6083     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6084              "HTT cookies disp.", pdev->comp_delivered);
6085     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6086              "MSDU queued", pdev->msdu_enqued);
6087     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6088              "MPDU queued", pdev->mpdu_enqued);
6089     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6090              "MSDUs dropped", pdev->wmm_drop);
6091     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6092              "Local enqued", pdev->local_enqued);
6093     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6094              "Local freed", pdev->local_freed);
6095     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6096              "HW queued", pdev->hw_queued);
6097     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6098              "PPDUs reaped", pdev->hw_reaped);
6099     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6100              "Num underruns", pdev->underrun);
6101     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6102              "Num HW Paused", pdev->hw_paused);
6103     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6104              "PPDUs cleaned", pdev->tx_abort);
6105     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6106              "MPDUs requeued", pdev->mpdus_requeued);
6107     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6108              "PPDU OK", pdev->tx_ko);
6109     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6110              "Excessive retries", pdev->tx_xretry);
6111     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6112              "HW rate", pdev->data_rc);
6113     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6114              "Sched self triggers", pdev->self_triggers);
6115     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6116              "Dropped due to SW retries",
6117              pdev->sw_retry_failure);
6118     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6119              "Illegal rate phy errors",
6120              pdev->illgl_rate_phy_err);
6121     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6122              "PDEV continuous xretry", pdev->pdev_cont_xretry);
6123     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6124              "TX timeout", pdev->pdev_tx_timeout);
6125     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6126              "PDEV resets", pdev->pdev_resets);
6127     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6128              "Stateless TIDs alloc failures",
6129              pdev->stateless_tid_alloc_failure);
6130     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6131              "PHY underrun", pdev->phy_underrun);
6132     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6133              "MPDU is more than txop limit", pdev->txop_ovf);
6134     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6135              "Num sequences posted", pdev->seq_posted);
6136     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6137              "Num seq failed queueing ", pdev->seq_failed_queueing);
6138     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6139              "Num sequences completed ", pdev->seq_completed);
6140     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6141              "Num sequences restarted ", pdev->seq_restarted);
6142     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6143              "Num of MU sequences posted ", pdev->mu_seq_posted);
6144     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6145              "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
6146     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6147              "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
6148     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6149              "Num of MPDUS truncated ", pdev->mpdus_truncated);
6150     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6151              "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
6152     len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
6153              "Num of MPDUS expired ", pdev->mpdus_expired);
6154     *length = len;
6155 }
6156 
6157 static void
6158 ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
6159                  char *buf, u32 *length)
6160 {
6161     u32 len = *length;
6162     u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
6163 
6164     len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
6165              "ath11k PDEV RX stats");
6166     len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6167              "====================");
6168 
6169     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6170              "Mid PPDU route change",
6171              pdev->mid_ppdu_route_change);
6172     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6173              "Tot. number of statuses", pdev->status_rcvd);
6174     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6175              "Extra frags on rings 0", pdev->r0_frags);
6176     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6177              "Extra frags on rings 1", pdev->r1_frags);
6178     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6179              "Extra frags on rings 2", pdev->r2_frags);
6180     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6181              "Extra frags on rings 3", pdev->r3_frags);
6182     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6183              "MSDUs delivered to HTT", pdev->htt_msdus);
6184     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6185              "MPDUs delivered to HTT", pdev->htt_mpdus);
6186     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6187              "MSDUs delivered to stack", pdev->loc_msdus);
6188     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6189              "MPDUs delivered to stack", pdev->loc_mpdus);
6190     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6191              "Oversized AMSUs", pdev->oversize_amsdu);
6192     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6193              "PHY errors", pdev->phy_errs);
6194     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6195              "PHY errors drops", pdev->phy_err_drop);
6196     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6197              "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
6198     len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
6199              "Overflow errors", pdev->rx_ovfl_errs);
6200     *length = len;
6201 }
6202 
6203 static void
6204 ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
6205                   const struct ath11k_fw_stats_vdev *vdev,
6206                   char *buf, u32 *length)
6207 {
6208     u32 len = *length;
6209     u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
6210     struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
6211     u8 *vif_macaddr;
6212     int i;
6213 
6214     /* VDEV stats has all the active VDEVs of other PDEVs as well,
6215      * ignoring those not part of requested PDEV
6216      */
6217     if (!arvif)
6218         return;
6219 
6220     vif_macaddr = arvif->vif->addr;
6221 
6222     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6223              "VDEV ID", vdev->vdev_id);
6224     len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
6225              "VDEV MAC address", vif_macaddr);
6226     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6227              "beacon snr", vdev->beacon_snr);
6228     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6229              "data snr", vdev->data_snr);
6230     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6231              "num rx frames", vdev->num_rx_frames);
6232     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6233              "num rts fail", vdev->num_rts_fail);
6234     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6235              "num rts success", vdev->num_rts_success);
6236     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6237              "num rx err", vdev->num_rx_err);
6238     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6239              "num rx discard", vdev->num_rx_discard);
6240     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6241              "num tx not acked", vdev->num_tx_not_acked);
6242 
6243     for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
6244         len += scnprintf(buf + len, buf_len - len,
6245                 "%25s [%02d] %u\n",
6246                 "num tx frames", i,
6247                 vdev->num_tx_frames[i]);
6248 
6249     for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
6250         len += scnprintf(buf + len, buf_len - len,
6251                 "%25s [%02d] %u\n",
6252                 "num tx frames retries", i,
6253                 vdev->num_tx_frames_retries[i]);
6254 
6255     for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
6256         len += scnprintf(buf + len, buf_len - len,
6257                 "%25s [%02d] %u\n",
6258                 "num tx frames failures", i,
6259                 vdev->num_tx_frames_failures[i]);
6260 
6261     for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
6262         len += scnprintf(buf + len, buf_len - len,
6263                 "%25s [%02d] 0x%08x\n",
6264                 "tx rate history", i,
6265                 vdev->tx_rate_history[i]);
6266 
6267     for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
6268         len += scnprintf(buf + len, buf_len - len,
6269                 "%25s [%02d] %u\n",
6270                 "beacon rssi history", i,
6271                 vdev->beacon_rssi_history[i]);
6272 
6273     len += scnprintf(buf + len, buf_len - len, "\n");
6274     *length = len;
6275 }
6276 
6277 static void
6278 ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
6279                  const struct ath11k_fw_stats_bcn *bcn,
6280                  char *buf, u32 *length)
6281 {
6282     u32 len = *length;
6283     u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
6284     struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
6285     u8 *vdev_macaddr;
6286 
6287     if (!arvif) {
6288         ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
6289                 bcn->vdev_id);
6290         return;
6291     }
6292 
6293     vdev_macaddr = arvif->vif->addr;
6294 
6295     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6296              "VDEV ID", bcn->vdev_id);
6297     len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
6298              "VDEV MAC address", vdev_macaddr);
6299     len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6300              "================");
6301     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6302              "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
6303     len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
6304              "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
6305 
6306     len += scnprintf(buf + len, buf_len - len, "\n");
6307     *length = len;
6308 }
6309 
6310 void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
6311                   struct ath11k_fw_stats *fw_stats,
6312                   u32 stats_id, char *buf)
6313 {
6314     u32 len = 0;
6315     u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
6316     const struct ath11k_fw_stats_pdev *pdev;
6317     const struct ath11k_fw_stats_vdev *vdev;
6318     const struct ath11k_fw_stats_bcn *bcn;
6319     size_t num_bcn;
6320 
6321     spin_lock_bh(&ar->data_lock);
6322 
6323     if (stats_id == WMI_REQUEST_PDEV_STAT) {
6324         pdev = list_first_entry_or_null(&fw_stats->pdevs,
6325                         struct ath11k_fw_stats_pdev, list);
6326         if (!pdev) {
6327             ath11k_warn(ar->ab, "failed to get pdev stats\n");
6328             goto unlock;
6329         }
6330 
6331         ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
6332         ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
6333         ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
6334     }
6335 
6336     if (stats_id == WMI_REQUEST_VDEV_STAT) {
6337         len += scnprintf(buf + len, buf_len - len, "\n");
6338         len += scnprintf(buf + len, buf_len - len, "%30s\n",
6339                  "ath11k VDEV stats");
6340         len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6341                  "=================");
6342 
6343         list_for_each_entry(vdev, &fw_stats->vdevs, list)
6344             ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
6345     }
6346 
6347     if (stats_id == WMI_REQUEST_BCN_STAT) {
6348         num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn);
6349 
6350         len += scnprintf(buf + len, buf_len - len, "\n");
6351         len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
6352                  "ath11k Beacon stats", num_bcn);
6353         len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
6354                  "===================");
6355 
6356         list_for_each_entry(bcn, &fw_stats->bcn, list)
6357             ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
6358     }
6359 
6360 unlock:
6361     spin_unlock_bh(&ar->data_lock);
6362 
6363     if (len >= buf_len)
6364         buf[len - 1] = 0;
6365     else
6366         buf[len] = 0;
6367 }
6368 
6369 static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
6370 {
6371     /* try to send pending beacons first. they take priority */
6372     wake_up(&ab->wmi_ab.tx_credits_wq);
6373 }
6374 
6375 static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
6376 {
6377     const struct wmi_11d_new_cc_ev *ev;
6378     struct ath11k *ar;
6379     struct ath11k_pdev *pdev;
6380     const void **tb;
6381     int ret, i;
6382 
6383     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6384     if (IS_ERR(tb)) {
6385         ret = PTR_ERR(tb);
6386         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
6387         return ret;
6388     }
6389 
6390     ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6391     if (!ev) {
6392         kfree(tb);
6393         ath11k_warn(ab, "failed to fetch 11d new cc ev");
6394         return -EPROTO;
6395     }
6396 
6397     spin_lock_bh(&ab->base_lock);
6398     memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
6399     spin_unlock_bh(&ab->base_lock);
6400 
6401     ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n",
6402            ab->new_alpha2[0],
6403            ab->new_alpha2[1]);
6404 
6405     kfree(tb);
6406 
6407     for (i = 0; i < ab->num_radios; i++) {
6408         pdev = &ab->pdevs[i];
6409         ar = pdev->ar;
6410         ar->state_11d = ATH11K_11D_IDLE;
6411         complete(&ar->completed_11d_scan);
6412     }
6413 
6414     queue_work(ab->workqueue, &ab->update_11d_work);
6415 
6416     return 0;
6417 }
6418 
6419 static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
6420                        struct sk_buff *skb)
6421 {
6422     struct ath11k_pdev_wmi *wmi = NULL;
6423     u32 i;
6424     u8 wmi_ep_count;
6425     u8 eid;
6426 
6427     eid = ATH11K_SKB_CB(skb)->eid;
6428     dev_kfree_skb(skb);
6429 
6430     if (eid >= ATH11K_HTC_EP_COUNT)
6431         return;
6432 
6433     wmi_ep_count = ab->htc.wmi_ep_count;
6434     if (wmi_ep_count > ab->hw_params.max_radios)
6435         return;
6436 
6437     for (i = 0; i < ab->htc.wmi_ep_count; i++) {
6438         if (ab->wmi_ab.wmi[i].eid == eid) {
6439             wmi = &ab->wmi_ab.wmi[i];
6440             break;
6441         }
6442     }
6443 
6444     if (wmi)
6445         wake_up(&wmi->tx_ce_desc_wq);
6446 }
6447 
6448 static bool ath11k_reg_is_world_alpha(char *alpha)
6449 {
6450     if (alpha[0] == '0' && alpha[1] == '0')
6451         return true;
6452 
6453     if (alpha[0] == 'n' && alpha[1] == 'a')
6454         return true;
6455 
6456     return false;
6457 }
6458 
6459 static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
6460 {
6461     struct cur_regulatory_info *reg_info = NULL;
6462     struct ieee80211_regdomain *regd = NULL;
6463     bool intersect = false;
6464     int ret = 0, pdev_idx;
6465     struct ath11k *ar;
6466 
6467     reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
6468     if (!reg_info) {
6469         ret = -ENOMEM;
6470         goto fallback;
6471     }
6472 
6473     ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
6474     if (ret) {
6475         ath11k_warn(ab, "failed to extract regulatory info from received event\n");
6476         goto fallback;
6477     }
6478 
6479     if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
6480         /* In case of failure to set the requested ctry,
6481          * fw retains the current regd. We print a failure info
6482          * and return from here.
6483          */
6484         ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
6485         goto mem_free;
6486     }
6487 
6488     pdev_idx = reg_info->phy_id;
6489 
6490     /* Avoid default reg rule updates sent during FW recovery if
6491      * it is already available
6492      */
6493     spin_lock(&ab->base_lock);
6494     if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
6495         ab->default_regd[pdev_idx]) {
6496         spin_unlock(&ab->base_lock);
6497         goto mem_free;
6498     }
6499     spin_unlock(&ab->base_lock);
6500 
6501     if (pdev_idx >= ab->num_radios) {
6502         /* Process the event for phy0 only if single_pdev_only
6503          * is true. If pdev_idx is valid but not 0, discard the
6504          * event. Otherwise, it goes to fallback.
6505          */
6506         if (ab->hw_params.single_pdev_only &&
6507             pdev_idx < ab->hw_params.num_rxmda_per_pdev)
6508             goto mem_free;
6509         else
6510             goto fallback;
6511     }
6512 
6513     /* Avoid multiple overwrites to default regd, during core
6514      * stop-start after mac registration.
6515      */
6516     if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
6517         !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
6518             (char *)reg_info->alpha2, 2))
6519         goto mem_free;
6520 
6521     /* Intersect new rules with default regd if a new country setting was
6522      * requested, i.e a default regd was already set during initialization
6523      * and the regd coming from this event has a valid country info.
6524      */
6525     if (ab->default_regd[pdev_idx] &&
6526         !ath11k_reg_is_world_alpha((char *)
6527         ab->default_regd[pdev_idx]->alpha2) &&
6528         !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
6529         intersect = true;
6530 
6531     regd = ath11k_reg_build_regd(ab, reg_info, intersect);
6532     if (!regd) {
6533         ath11k_warn(ab, "failed to build regd from reg_info\n");
6534         goto fallback;
6535     }
6536 
6537     spin_lock(&ab->base_lock);
6538     if (ab->default_regd[pdev_idx]) {
6539         /* The initial rules from FW after WMI Init is to build
6540          * the default regd. From then on, any rules updated for
6541          * the pdev could be due to user reg changes.
6542          * Free previously built regd before assigning the newly
6543          * generated regd to ar. NULL pointer handling will be
6544          * taken care by kfree itself.
6545          */
6546         ar = ab->pdevs[pdev_idx].ar;
6547         kfree(ab->new_regd[pdev_idx]);
6548         ab->new_regd[pdev_idx] = regd;
6549         queue_work(ab->workqueue, &ar->regd_update_work);
6550     } else {
6551         /* This regd would be applied during mac registration and is
6552          * held constant throughout for regd intersection purpose
6553          */
6554         ab->default_regd[pdev_idx] = regd;
6555     }
6556     ab->dfs_region = reg_info->dfs_region;
6557     spin_unlock(&ab->base_lock);
6558 
6559     goto mem_free;
6560 
6561 fallback:
6562     /* Fallback to older reg (by sending previous country setting
6563      * again if fw has succeeded and we failed to process here.
6564      * The Regdomain should be uniform across driver and fw. Since the
6565      * FW has processed the command and sent a success status, we expect
6566      * this function to succeed as well. If it doesn't, CTRY needs to be
6567      * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
6568      */
6569     /* TODO: This is rare, but still should also be handled */
6570     WARN_ON(1);
6571 mem_free:
6572     if (reg_info) {
6573         kfree(reg_info->reg_rules_2g_ptr);
6574         kfree(reg_info->reg_rules_5g_ptr);
6575         kfree(reg_info);
6576     }
6577     return ret;
6578 }
6579 
6580 static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
6581                     const void *ptr, void *data)
6582 {
6583     struct wmi_tlv_rdy_parse *rdy_parse = data;
6584     struct wmi_ready_event fixed_param;
6585     struct wmi_mac_addr *addr_list;
6586     struct ath11k_pdev *pdev;
6587     u32 num_mac_addr;
6588     int i;
6589 
6590     switch (tag) {
6591     case WMI_TAG_READY_EVENT:
6592         memset(&fixed_param, 0, sizeof(fixed_param));
6593         memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
6594                min_t(u16, sizeof(fixed_param), len));
6595         ab->wlan_init_status = fixed_param.ready_event_min.status;
6596         rdy_parse->num_extra_mac_addr =
6597             fixed_param.ready_event_min.num_extra_mac_addr;
6598 
6599         ether_addr_copy(ab->mac_addr,
6600                 fixed_param.ready_event_min.mac_addr.addr);
6601         ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
6602         ab->wmi_ready = true;
6603         break;
6604     case WMI_TAG_ARRAY_FIXED_STRUCT:
6605         addr_list = (struct wmi_mac_addr *)ptr;
6606         num_mac_addr = rdy_parse->num_extra_mac_addr;
6607 
6608         if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
6609             break;
6610 
6611         for (i = 0; i < ab->num_radios; i++) {
6612             pdev = &ab->pdevs[i];
6613             ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
6614         }
6615         ab->pdevs_macaddr_valid = true;
6616         break;
6617     default:
6618         break;
6619     }
6620 
6621     return 0;
6622 }
6623 
6624 static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
6625 {
6626     struct wmi_tlv_rdy_parse rdy_parse = { };
6627     int ret;
6628 
6629     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
6630                   ath11k_wmi_tlv_rdy_parse, &rdy_parse);
6631     if (ret) {
6632         ath11k_warn(ab, "failed to parse tlv %d\n", ret);
6633         return ret;
6634     }
6635 
6636     complete(&ab->wmi_ab.unified_ready);
6637     return 0;
6638 }
6639 
6640 static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
6641 {
6642     struct wmi_peer_delete_resp_event peer_del_resp;
6643     struct ath11k *ar;
6644 
6645     if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
6646         ath11k_warn(ab, "failed to extract peer delete resp");
6647         return;
6648     }
6649 
6650     rcu_read_lock();
6651     ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
6652     if (!ar) {
6653         ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d",
6654                 peer_del_resp.vdev_id);
6655         rcu_read_unlock();
6656         return;
6657     }
6658 
6659     complete(&ar->peer_delete_done);
6660     rcu_read_unlock();
6661     ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
6662            peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
6663 }
6664 
6665 static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab,
6666                       struct sk_buff *skb)
6667 {
6668     struct ath11k *ar;
6669     u32 vdev_id = 0;
6670 
6671     if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
6672         ath11k_warn(ab, "failed to extract vdev delete resp");
6673         return;
6674     }
6675 
6676     rcu_read_lock();
6677     ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
6678     if (!ar) {
6679         ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
6680                 vdev_id);
6681         rcu_read_unlock();
6682         return;
6683     }
6684 
6685     complete(&ar->vdev_delete_done);
6686 
6687     rcu_read_unlock();
6688 
6689     ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev delete resp for vdev id %d\n",
6690            vdev_id);
6691 }
6692 
6693 static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
6694 {
6695     switch (vdev_resp_status) {
6696     case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
6697         return "invalid vdev id";
6698     case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
6699         return "not supported";
6700     case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
6701         return "dfs violation";
6702     case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
6703         return "invalid regdomain";
6704     default:
6705         return "unknown";
6706     }
6707 }
6708 
6709 static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
6710 {
6711     struct wmi_vdev_start_resp_event vdev_start_resp;
6712     struct ath11k *ar;
6713     u32 status;
6714 
6715     if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
6716         ath11k_warn(ab, "failed to extract vdev start resp");
6717         return;
6718     }
6719 
6720     rcu_read_lock();
6721     ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
6722     if (!ar) {
6723         ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
6724                 vdev_start_resp.vdev_id);
6725         rcu_read_unlock();
6726         return;
6727     }
6728 
6729     ar->last_wmi_vdev_start_status = 0;
6730 
6731     status = vdev_start_resp.status;
6732 
6733     if (WARN_ON_ONCE(status)) {
6734         ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
6735                 status, ath11k_wmi_vdev_resp_print(status));
6736         ar->last_wmi_vdev_start_status = status;
6737     }
6738 
6739     complete(&ar->vdev_setup_done);
6740 
6741     rcu_read_unlock();
6742 
6743     ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
6744            vdev_start_resp.vdev_id);
6745 }
6746 
6747 static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
6748 {
6749     struct ath11k_vif *arvif;
6750     u32 vdev_id, tx_status;
6751 
6752     if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
6753                      &vdev_id, &tx_status) != 0) {
6754         ath11k_warn(ab, "failed to extract bcn tx status");
6755         return;
6756     }
6757 
6758     rcu_read_lock();
6759     arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
6760     if (!arvif) {
6761         ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
6762                 vdev_id);
6763         rcu_read_unlock();
6764         return;
6765     }
6766     ath11k_mac_bcn_tx_event(arvif);
6767     rcu_read_unlock();
6768 }
6769 
6770 static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
6771 {
6772     struct ath11k *ar;
6773     u32 vdev_id = 0;
6774 
6775     if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
6776         ath11k_warn(ab, "failed to extract vdev stopped event");
6777         return;
6778     }
6779 
6780     rcu_read_lock();
6781     ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
6782     if (!ar) {
6783         ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
6784                 vdev_id);
6785         rcu_read_unlock();
6786         return;
6787     }
6788 
6789     complete(&ar->vdev_setup_done);
6790 
6791     rcu_read_unlock();
6792 
6793     ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
6794 }
6795 
6796 static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
6797 {
6798     struct mgmt_rx_event_params rx_ev = {0};
6799     struct ath11k *ar;
6800     struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
6801     struct ieee80211_hdr *hdr;
6802     u16 fc;
6803     struct ieee80211_supported_band *sband;
6804 
6805     if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
6806         ath11k_warn(ab, "failed to extract mgmt rx event");
6807         dev_kfree_skb(skb);
6808         return;
6809     }
6810 
6811     memset(status, 0, sizeof(*status));
6812 
6813     ath11k_dbg(ab, ATH11K_DBG_MGMT, "mgmt rx event status %08x\n",
6814            rx_ev.status);
6815 
6816     rcu_read_lock();
6817     ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
6818 
6819     if (!ar) {
6820         ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
6821                 rx_ev.pdev_id);
6822         dev_kfree_skb(skb);
6823         goto exit;
6824     }
6825 
6826     if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
6827         (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
6828         WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
6829         dev_kfree_skb(skb);
6830         goto exit;
6831     }
6832 
6833     if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
6834         status->flag |= RX_FLAG_MMIC_ERROR;
6835 
6836     if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
6837         rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
6838         status->band = NL80211_BAND_6GHZ;
6839         status->freq = rx_ev.chan_freq;
6840     } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
6841         status->band = NL80211_BAND_2GHZ;
6842     } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
6843         status->band = NL80211_BAND_5GHZ;
6844     } else {
6845         /* Shouldn't happen unless list of advertised channels to
6846          * mac80211 has been changed.
6847          */
6848         WARN_ON_ONCE(1);
6849         dev_kfree_skb(skb);
6850         goto exit;
6851     }
6852 
6853     if (rx_ev.phy_mode == MODE_11B &&
6854         (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
6855         ath11k_dbg(ab, ATH11K_DBG_WMI,
6856                "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
6857 
6858     sband = &ar->mac.sbands[status->band];
6859 
6860     if (status->band != NL80211_BAND_6GHZ)
6861         status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
6862                                   status->band);
6863 
6864     status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
6865     status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
6866 
6867     hdr = (struct ieee80211_hdr *)skb->data;
6868     fc = le16_to_cpu(hdr->frame_control);
6869 
6870     /* Firmware is guaranteed to report all essential management frames via
6871      * WMI while it can deliver some extra via HTT. Since there can be
6872      * duplicates split the reporting wrt monitor/sniffing.
6873      */
6874     status->flag |= RX_FLAG_SKIP_MONITOR;
6875 
6876     /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
6877      * Don't clear that. Also, FW delivers broadcast management frames
6878      * (ex: group privacy action frames in mesh) as encrypted payload.
6879      */
6880     if (ieee80211_has_protected(hdr->frame_control) &&
6881         !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
6882         status->flag |= RX_FLAG_DECRYPTED;
6883 
6884         if (!ieee80211_is_robust_mgmt_frame(skb)) {
6885             status->flag |= RX_FLAG_IV_STRIPPED |
6886                     RX_FLAG_MMIC_STRIPPED;
6887             hdr->frame_control = __cpu_to_le16(fc &
6888                          ~IEEE80211_FCTL_PROTECTED);
6889         }
6890     }
6891 
6892     if (ieee80211_is_beacon(hdr->frame_control))
6893         ath11k_mac_handle_beacon(ar, skb);
6894 
6895     ath11k_dbg(ab, ATH11K_DBG_MGMT,
6896            "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
6897            skb, skb->len,
6898            fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
6899 
6900     ath11k_dbg(ab, ATH11K_DBG_MGMT,
6901            "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
6902            status->freq, status->band, status->signal,
6903            status->rate_idx);
6904 
6905     ieee80211_rx_ni(ar->hw, skb);
6906 
6907 exit:
6908     rcu_read_unlock();
6909 }
6910 
6911 static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
6912 {
6913     struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
6914     struct ath11k *ar;
6915 
6916     if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
6917         ath11k_warn(ab, "failed to extract mgmt tx compl event");
6918         return;
6919     }
6920 
6921     rcu_read_lock();
6922     ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
6923     if (!ar) {
6924         ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
6925                 tx_compl_param.pdev_id);
6926         goto exit;
6927     }
6928 
6929     wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id,
6930                  tx_compl_param.status);
6931 
6932     ath11k_dbg(ab, ATH11K_DBG_MGMT,
6933            "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6934            tx_compl_param.pdev_id, tx_compl_param.desc_id,
6935            tx_compl_param.status);
6936 
6937 exit:
6938     rcu_read_unlock();
6939 }
6940 
6941 static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
6942                           u32 vdev_id,
6943                           enum ath11k_scan_state state)
6944 {
6945     int i;
6946     struct ath11k_pdev *pdev;
6947     struct ath11k *ar;
6948 
6949     for (i = 0; i < ab->num_radios; i++) {
6950         pdev = rcu_dereference(ab->pdevs_active[i]);
6951         if (pdev && pdev->ar) {
6952             ar = pdev->ar;
6953 
6954             spin_lock_bh(&ar->data_lock);
6955             if (ar->scan.state == state &&
6956                 ar->scan.vdev_id == vdev_id) {
6957                 spin_unlock_bh(&ar->data_lock);
6958                 return ar;
6959             }
6960             spin_unlock_bh(&ar->data_lock);
6961         }
6962     }
6963     return NULL;
6964 }
6965 
6966 static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
6967 {
6968     struct ath11k *ar;
6969     struct wmi_scan_event scan_ev = {0};
6970 
6971     if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
6972         ath11k_warn(ab, "failed to extract scan event");
6973         return;
6974     }
6975 
6976     rcu_read_lock();
6977 
6978     /* In case the scan was cancelled, ex. during interface teardown,
6979      * the interface will not be found in active interfaces.
6980      * Rather, in such scenarios, iterate over the active pdev's to
6981      * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6982      * aborting scan's vdev id matches this event info.
6983      */
6984     if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
6985         scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
6986         ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
6987                          ATH11K_SCAN_ABORTING);
6988         if (!ar)
6989             ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
6990                              ATH11K_SCAN_RUNNING);
6991     } else {
6992         ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
6993     }
6994 
6995     if (!ar) {
6996         ath11k_warn(ab, "Received scan event for unknown vdev");
6997         rcu_read_unlock();
6998         return;
6999     }
7000 
7001     spin_lock_bh(&ar->data_lock);
7002 
7003     ath11k_dbg(ab, ATH11K_DBG_WMI,
7004            "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
7005            ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
7006            scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
7007            scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
7008            ath11k_scan_state_str(ar->scan.state), ar->scan.state);
7009 
7010     switch (scan_ev.event_type) {
7011     case WMI_SCAN_EVENT_STARTED:
7012         ath11k_wmi_event_scan_started(ar);
7013         break;
7014     case WMI_SCAN_EVENT_COMPLETED:
7015         ath11k_wmi_event_scan_completed(ar);
7016         break;
7017     case WMI_SCAN_EVENT_BSS_CHANNEL:
7018         ath11k_wmi_event_scan_bss_chan(ar);
7019         break;
7020     case WMI_SCAN_EVENT_FOREIGN_CHAN:
7021         ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
7022         break;
7023     case WMI_SCAN_EVENT_START_FAILED:
7024         ath11k_warn(ab, "received scan start failure event\n");
7025         ath11k_wmi_event_scan_start_failed(ar);
7026         break;
7027     case WMI_SCAN_EVENT_DEQUEUED:
7028         __ath11k_mac_scan_finish(ar);
7029         break;
7030     case WMI_SCAN_EVENT_PREEMPTED:
7031     case WMI_SCAN_EVENT_RESTARTED:
7032     case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
7033     default:
7034         break;
7035     }
7036 
7037     spin_unlock_bh(&ar->data_lock);
7038 
7039     rcu_read_unlock();
7040 }
7041 
7042 static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
7043 {
7044     struct wmi_peer_sta_kickout_arg arg = {};
7045     struct ieee80211_sta *sta;
7046     struct ath11k_peer *peer;
7047     struct ath11k *ar;
7048     u32 vdev_id;
7049 
7050     if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
7051         ath11k_warn(ab, "failed to extract peer sta kickout event");
7052         return;
7053     }
7054 
7055     rcu_read_lock();
7056 
7057     spin_lock_bh(&ab->base_lock);
7058 
7059     peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
7060 
7061     if (!peer) {
7062         ath11k_warn(ab, "peer not found %pM\n",
7063                 arg.mac_addr);
7064         spin_unlock_bh(&ab->base_lock);
7065         goto exit;
7066     }
7067 
7068     vdev_id = peer->vdev_id;
7069 
7070     spin_unlock_bh(&ab->base_lock);
7071 
7072     ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
7073     if (!ar) {
7074         ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
7075                 peer->vdev_id);
7076         goto exit;
7077     }
7078 
7079     sta = ieee80211_find_sta_by_ifaddr(ar->hw,
7080                        arg.mac_addr, NULL);
7081     if (!sta) {
7082         ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
7083                 arg.mac_addr);
7084         goto exit;
7085     }
7086 
7087     ath11k_dbg(ab, ATH11K_DBG_WMI, "peer sta kickout event %pM",
7088            arg.mac_addr);
7089 
7090     ieee80211_report_low_ack(sta, 10);
7091 
7092 exit:
7093     rcu_read_unlock();
7094 }
7095 
7096 static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
7097 {
7098     struct wmi_roam_event roam_ev = {};
7099     struct ath11k *ar;
7100 
7101     if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
7102         ath11k_warn(ab, "failed to extract roam event");
7103         return;
7104     }
7105 
7106     ath11k_dbg(ab, ATH11K_DBG_WMI,
7107            "wmi roam event vdev %u reason 0x%08x rssi %d\n",
7108            roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
7109 
7110     rcu_read_lock();
7111     ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
7112     if (!ar) {
7113         ath11k_warn(ab, "invalid vdev id in roam ev %d",
7114                 roam_ev.vdev_id);
7115         rcu_read_unlock();
7116         return;
7117     }
7118 
7119     if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
7120         ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
7121                 roam_ev.reason, roam_ev.vdev_id);
7122 
7123     switch (roam_ev.reason) {
7124     case WMI_ROAM_REASON_BEACON_MISS:
7125         ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
7126         break;
7127     case WMI_ROAM_REASON_BETTER_AP:
7128     case WMI_ROAM_REASON_LOW_RSSI:
7129     case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
7130     case WMI_ROAM_REASON_HO_FAILED:
7131         ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
7132                 roam_ev.reason, roam_ev.vdev_id);
7133         break;
7134     }
7135 
7136     rcu_read_unlock();
7137 }
7138 
7139 static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
7140 {
7141     struct wmi_chan_info_event ch_info_ev = {0};
7142     struct ath11k *ar;
7143     struct survey_info *survey;
7144     int idx;
7145     /* HW channel counters frequency value in hertz */
7146     u32 cc_freq_hz = ab->cc_freq_hz;
7147 
7148     if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
7149         ath11k_warn(ab, "failed to extract chan info event");
7150         return;
7151     }
7152 
7153     ath11k_dbg(ab, ATH11K_DBG_WMI,
7154            "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
7155            ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
7156            ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
7157            ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
7158            ch_info_ev.mac_clk_mhz);
7159 
7160     if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
7161         ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
7162         return;
7163     }
7164 
7165     rcu_read_lock();
7166     ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
7167     if (!ar) {
7168         ath11k_warn(ab, "invalid vdev id in chan info ev %d",
7169                 ch_info_ev.vdev_id);
7170         rcu_read_unlock();
7171         return;
7172     }
7173     spin_lock_bh(&ar->data_lock);
7174 
7175     switch (ar->scan.state) {
7176     case ATH11K_SCAN_IDLE:
7177     case ATH11K_SCAN_STARTING:
7178         ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
7179         goto exit;
7180     case ATH11K_SCAN_RUNNING:
7181     case ATH11K_SCAN_ABORTING:
7182         break;
7183     }
7184 
7185     idx = freq_to_idx(ar, ch_info_ev.freq);
7186     if (idx >= ARRAY_SIZE(ar->survey)) {
7187         ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
7188                 ch_info_ev.freq, idx);
7189         goto exit;
7190     }
7191 
7192     /* If FW provides MAC clock frequency in Mhz, overriding the initialized
7193      * HW channel counters frequency value
7194      */
7195     if (ch_info_ev.mac_clk_mhz)
7196         cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
7197 
7198     if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
7199         survey = &ar->survey[idx];
7200         memset(survey, 0, sizeof(*survey));
7201         survey->noise = ch_info_ev.noise_floor;
7202         survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
7203                  SURVEY_INFO_TIME_BUSY;
7204         survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
7205         survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
7206     }
7207 exit:
7208     spin_unlock_bh(&ar->data_lock);
7209     rcu_read_unlock();
7210 }
7211 
7212 static void
7213 ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
7214 {
7215     struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
7216     struct survey_info *survey;
7217     struct ath11k *ar;
7218     u32 cc_freq_hz = ab->cc_freq_hz;
7219     u64 busy, total, tx, rx, rx_bss;
7220     int idx;
7221 
7222     if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
7223         ath11k_warn(ab, "failed to extract pdev bss chan info event");
7224         return;
7225     }
7226 
7227     busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
7228             bss_ch_info_ev.rx_clear_count_low;
7229 
7230     total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
7231             bss_ch_info_ev.cycle_count_low;
7232 
7233     tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
7234             bss_ch_info_ev.tx_cycle_count_low;
7235 
7236     rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
7237             bss_ch_info_ev.rx_cycle_count_low;
7238 
7239     rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
7240             bss_ch_info_ev.rx_bss_cycle_count_low;
7241 
7242     ath11k_dbg(ab, ATH11K_DBG_WMI,
7243            "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
7244            bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
7245            bss_ch_info_ev.noise_floor, busy, total,
7246            tx, rx, rx_bss);
7247 
7248     rcu_read_lock();
7249     ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
7250 
7251     if (!ar) {
7252         ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
7253                 bss_ch_info_ev.pdev_id);
7254         rcu_read_unlock();
7255         return;
7256     }
7257 
7258     spin_lock_bh(&ar->data_lock);
7259     idx = freq_to_idx(ar, bss_ch_info_ev.freq);
7260     if (idx >= ARRAY_SIZE(ar->survey)) {
7261         ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
7262                 bss_ch_info_ev.freq, idx);
7263         goto exit;
7264     }
7265 
7266     survey = &ar->survey[idx];
7267 
7268     survey->noise     = bss_ch_info_ev.noise_floor;
7269     survey->time      = div_u64(total, cc_freq_hz);
7270     survey->time_busy = div_u64(busy, cc_freq_hz);
7271     survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
7272     survey->time_tx   = div_u64(tx, cc_freq_hz);
7273     survey->filled   |= (SURVEY_INFO_NOISE_DBM |
7274                  SURVEY_INFO_TIME |
7275                  SURVEY_INFO_TIME_BUSY |
7276                  SURVEY_INFO_TIME_RX |
7277                  SURVEY_INFO_TIME_TX);
7278 exit:
7279     spin_unlock_bh(&ar->data_lock);
7280     complete(&ar->bss_survey_done);
7281 
7282     rcu_read_unlock();
7283 }
7284 
7285 static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
7286                         struct sk_buff *skb)
7287 {
7288     struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
7289     struct ath11k *ar;
7290 
7291     if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
7292         ath11k_warn(ab, "failed to extract install key compl event");
7293         return;
7294     }
7295 
7296     ath11k_dbg(ab, ATH11K_DBG_WMI,
7297            "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
7298            install_key_compl.key_idx, install_key_compl.key_flags,
7299            install_key_compl.macaddr, install_key_compl.status);
7300 
7301     rcu_read_lock();
7302     ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
7303     if (!ar) {
7304         ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
7305                 install_key_compl.vdev_id);
7306         rcu_read_unlock();
7307         return;
7308     }
7309 
7310     ar->install_key_status = 0;
7311 
7312     if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
7313         ath11k_warn(ab, "install key failed for %pM status %d\n",
7314                 install_key_compl.macaddr, install_key_compl.status);
7315         ar->install_key_status = install_key_compl.status;
7316     }
7317 
7318     complete(&ar->install_key_done);
7319     rcu_read_unlock();
7320 }
7321 
7322 static int  ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
7323                        u16 tag, u16 len,
7324                        const void *ptr, void *data)
7325 {
7326     const struct wmi_service_available_event *ev;
7327     u32 *wmi_ext2_service_bitmap;
7328     int i, j;
7329 
7330     switch (tag) {
7331     case WMI_TAG_SERVICE_AVAILABLE_EVENT:
7332         ev = (struct wmi_service_available_event *)ptr;
7333         for (i = 0, j = WMI_MAX_SERVICE;
7334             i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
7335             i++) {
7336             do {
7337                 if (ev->wmi_service_segment_bitmap[i] &
7338                     BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7339                     set_bit(j, ab->wmi_ab.svc_map);
7340             } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7341         }
7342 
7343         ath11k_dbg(ab, ATH11K_DBG_WMI,
7344                "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
7345                ev->wmi_service_segment_bitmap[0],
7346                ev->wmi_service_segment_bitmap[1],
7347                ev->wmi_service_segment_bitmap[2],
7348                ev->wmi_service_segment_bitmap[3]);
7349         break;
7350     case WMI_TAG_ARRAY_UINT32:
7351         wmi_ext2_service_bitmap = (u32 *)ptr;
7352         for (i = 0, j = WMI_MAX_EXT_SERVICE;
7353             i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
7354             i++) {
7355             do {
7356                 if (wmi_ext2_service_bitmap[i] &
7357                     BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7358                     set_bit(j, ab->wmi_ab.svc_map);
7359             } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7360         }
7361 
7362         ath11k_dbg(ab, ATH11K_DBG_WMI,
7363                "wmi_ext2_service__bitmap  0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
7364                wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
7365                wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
7366         break;
7367     }
7368     return 0;
7369 }
7370 
7371 static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
7372 {
7373     int ret;
7374 
7375     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
7376                   ath11k_wmi_tlv_services_parser,
7377                   NULL);
7378     if (ret)
7379         ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
7380 }
7381 
7382 static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
7383 {
7384     struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
7385     struct ath11k *ar;
7386 
7387     if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7388         ath11k_warn(ab, "failed to extract peer assoc conf event");
7389         return;
7390     }
7391 
7392     ath11k_dbg(ab, ATH11K_DBG_WMI,
7393            "peer assoc conf ev vdev id %d macaddr %pM\n",
7394            peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
7395 
7396     rcu_read_lock();
7397     ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
7398 
7399     if (!ar) {
7400         ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
7401                 peer_assoc_conf.vdev_id);
7402         rcu_read_unlock();
7403         return;
7404     }
7405 
7406     complete(&ar->peer_assoc_done);
7407     rcu_read_unlock();
7408 }
7409 
7410 static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
7411 {
7412     ath11k_debugfs_fw_stats_process(ab, skb);
7413 }
7414 
7415 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
7416  * is not part of BDF CTL(Conformance test limits) table entries.
7417  */
7418 static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
7419                          struct sk_buff *skb)
7420 {
7421     const void **tb;
7422     const struct wmi_pdev_ctl_failsafe_chk_event *ev;
7423     int ret;
7424 
7425     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7426     if (IS_ERR(tb)) {
7427         ret = PTR_ERR(tb);
7428         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
7429         return;
7430     }
7431 
7432     ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
7433     if (!ev) {
7434         ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
7435         kfree(tb);
7436         return;
7437     }
7438 
7439     ath11k_dbg(ab, ATH11K_DBG_WMI,
7440            "pdev ctl failsafe check ev status %d\n",
7441            ev->ctl_failsafe_status);
7442 
7443     /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
7444      * to 10 dBm else the CTL power entry in the BDF would be picked up.
7445      */
7446     if (ev->ctl_failsafe_status != 0)
7447         ath11k_warn(ab, "pdev ctl failsafe failure status %d",
7448                 ev->ctl_failsafe_status);
7449 
7450     kfree(tb);
7451 }
7452 
7453 static void
7454 ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
7455                       const struct wmi_pdev_csa_switch_ev *ev,
7456                       const u32 *vdev_ids)
7457 {
7458     int i;
7459     struct ath11k_vif *arvif;
7460 
7461     /* Finish CSA once the switch count becomes NULL */
7462     if (ev->current_switch_count)
7463         return;
7464 
7465     rcu_read_lock();
7466     for (i = 0; i < ev->num_vdevs; i++) {
7467         arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
7468 
7469         if (!arvif) {
7470             ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
7471                     vdev_ids[i]);
7472             continue;
7473         }
7474 
7475         if (arvif->is_up && arvif->vif->bss_conf.csa_active)
7476             ieee80211_csa_finish(arvif->vif);
7477     }
7478     rcu_read_unlock();
7479 }
7480 
7481 static void
7482 ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
7483                           struct sk_buff *skb)
7484 {
7485     const void **tb;
7486     const struct wmi_pdev_csa_switch_ev *ev;
7487     const u32 *vdev_ids;
7488     int ret;
7489 
7490     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7491     if (IS_ERR(tb)) {
7492         ret = PTR_ERR(tb);
7493         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
7494         return;
7495     }
7496 
7497     ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
7498     vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
7499 
7500     if (!ev || !vdev_ids) {
7501         ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
7502         kfree(tb);
7503         return;
7504     }
7505 
7506     ath11k_dbg(ab, ATH11K_DBG_WMI,
7507            "pdev csa switch count %d for pdev %d, num_vdevs %d",
7508            ev->current_switch_count, ev->pdev_id,
7509            ev->num_vdevs);
7510 
7511     ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
7512 
7513     kfree(tb);
7514 }
7515 
7516 static void
7517 ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
7518 {
7519     const void **tb;
7520     const struct wmi_pdev_radar_ev *ev;
7521     struct ath11k *ar;
7522     int ret;
7523 
7524     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7525     if (IS_ERR(tb)) {
7526         ret = PTR_ERR(tb);
7527         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
7528         return;
7529     }
7530 
7531     ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
7532 
7533     if (!ev) {
7534         ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
7535         kfree(tb);
7536         return;
7537     }
7538 
7539     ath11k_dbg(ab, ATH11K_DBG_WMI,
7540            "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
7541            ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
7542            ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
7543            ev->freq_offset, ev->sidx);
7544 
7545     ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
7546 
7547     if (!ar) {
7548         ath11k_warn(ab, "radar detected in invalid pdev %d\n",
7549                 ev->pdev_id);
7550         goto exit;
7551     }
7552 
7553     ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
7554            ev->pdev_id);
7555 
7556     if (ar->dfs_block_radar_events)
7557         ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
7558     else
7559         ieee80211_radar_detected(ar->hw);
7560 
7561 exit:
7562     kfree(tb);
7563 }
7564 
7565 static void
7566 ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
7567                   struct sk_buff *skb)
7568 {
7569     struct ath11k *ar;
7570     const void **tb;
7571     const struct wmi_pdev_temperature_event *ev;
7572     int ret;
7573 
7574     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7575     if (IS_ERR(tb)) {
7576         ret = PTR_ERR(tb);
7577         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
7578         return;
7579     }
7580 
7581     ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
7582     if (!ev) {
7583         ath11k_warn(ab, "failed to fetch pdev temp ev");
7584         kfree(tb);
7585         return;
7586     }
7587 
7588     ath11k_dbg(ab, ATH11K_DBG_WMI,
7589            "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
7590 
7591     ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
7592     if (!ar) {
7593         ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
7594         kfree(tb);
7595         return;
7596     }
7597 
7598     ath11k_thermal_event_temperature(ar, ev->temp);
7599 
7600     kfree(tb);
7601 }
7602 
7603 static void ath11k_fils_discovery_event(struct ath11k_base *ab,
7604                     struct sk_buff *skb)
7605 {
7606     const void **tb;
7607     const struct wmi_fils_discovery_event *ev;
7608     int ret;
7609 
7610     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7611     if (IS_ERR(tb)) {
7612         ret = PTR_ERR(tb);
7613         ath11k_warn(ab,
7614                 "failed to parse FILS discovery event tlv %d\n",
7615                 ret);
7616         return;
7617     }
7618 
7619     ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
7620     if (!ev) {
7621         ath11k_warn(ab, "failed to fetch FILS discovery event\n");
7622         kfree(tb);
7623         return;
7624     }
7625 
7626     ath11k_warn(ab,
7627             "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
7628             ev->vdev_id, ev->fils_tt, ev->tbtt);
7629 
7630     kfree(tb);
7631 }
7632 
7633 static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
7634                           struct sk_buff *skb)
7635 {
7636     const void **tb;
7637     const struct wmi_probe_resp_tx_status_event *ev;
7638     int ret;
7639 
7640     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7641     if (IS_ERR(tb)) {
7642         ret = PTR_ERR(tb);
7643         ath11k_warn(ab,
7644                 "failed to parse probe response transmission status event tlv: %d\n",
7645                 ret);
7646         return;
7647     }
7648 
7649     ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
7650     if (!ev) {
7651         ath11k_warn(ab,
7652                 "failed to fetch probe response transmission status event");
7653         kfree(tb);
7654         return;
7655     }
7656 
7657     if (ev->tx_status)
7658         ath11k_warn(ab,
7659                 "Probe response transmission failed for vdev_id %u, status %u\n",
7660                 ev->vdev_id, ev->tx_status);
7661 
7662     kfree(tb);
7663 }
7664 
7665 static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab,
7666                         u16 tag, u16 len,
7667                         const void *ptr, void *data)
7668 {
7669     struct wmi_wow_ev_arg *ev = data;
7670     const char *wow_pg_fault;
7671     int wow_pg_len;
7672 
7673     switch (tag) {
7674     case WMI_TAG_WOW_EVENT_INFO:
7675         memcpy(ev, ptr, sizeof(*ev));
7676         ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n",
7677                ev->wake_reason, wow_reason(ev->wake_reason));
7678         break;
7679 
7680     case WMI_TAG_ARRAY_BYTE:
7681         if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) {
7682             wow_pg_fault = ptr;
7683             /* the first 4 bytes are length */
7684             wow_pg_len = *(int *)wow_pg_fault;
7685             wow_pg_fault += sizeof(int);
7686             ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n",
7687                    wow_pg_len);
7688             ath11k_dbg_dump(ab, ATH11K_DBG_WMI,
7689                     "wow_event_info_type packet present",
7690                     "wow_pg_fault ",
7691                     wow_pg_fault,
7692                     wow_pg_len);
7693         }
7694         break;
7695     default:
7696         break;
7697     }
7698 
7699     return 0;
7700 }
7701 
7702 static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb)
7703 {
7704     struct wmi_wow_ev_arg ev = { };
7705     int ret;
7706 
7707     ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
7708                   ath11k_wmi_tlv_wow_wakeup_host_parse,
7709                   &ev);
7710     if (ret) {
7711         ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret);
7712         return;
7713     }
7714 
7715     complete(&ab->wow.wakeup_completed);
7716 }
7717 
7718 static void
7719 ath11k_wmi_diag_event(struct ath11k_base *ab,
7720               struct sk_buff *skb)
7721 {
7722     trace_ath11k_wmi_diag(ab, skb->data, skb->len);
7723 }
7724 
7725 static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
7726 {
7727     switch (status) {
7728     case WMI_ADD_TWT_STATUS_OK:
7729         return "ok";
7730     case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
7731         return "twt disabled";
7732     case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
7733         return "dialog id in use";
7734     case WMI_ADD_TWT_STATUS_INVALID_PARAM:
7735         return "invalid parameters";
7736     case WMI_ADD_TWT_STATUS_NOT_READY:
7737         return "not ready";
7738     case WMI_ADD_TWT_STATUS_NO_RESOURCE:
7739         return "resource unavailable";
7740     case WMI_ADD_TWT_STATUS_NO_ACK:
7741         return "no ack";
7742     case WMI_ADD_TWT_STATUS_NO_RESPONSE:
7743         return "no response";
7744     case WMI_ADD_TWT_STATUS_DENIED:
7745         return "denied";
7746     case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
7747         fallthrough;
7748     default:
7749         return "unknown error";
7750     }
7751 }
7752 
7753 static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
7754                         struct sk_buff *skb)
7755 {
7756     const void **tb;
7757     const struct wmi_twt_add_dialog_event *ev;
7758     int ret;
7759 
7760     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7761     if (IS_ERR(tb)) {
7762         ret = PTR_ERR(tb);
7763         ath11k_warn(ab,
7764                 "failed to parse wmi twt add dialog status event tlv: %d\n",
7765                 ret);
7766         return;
7767     }
7768 
7769     ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
7770     if (!ev) {
7771         ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
7772         goto exit;
7773     }
7774 
7775     if (ev->status)
7776         ath11k_warn(ab,
7777                 "wmi add twt dialog event vdev %d dialog id %d status %s\n",
7778                 ev->vdev_id, ev->dialog_id,
7779                 ath11k_wmi_twt_add_dialog_event_status(ev->status));
7780 
7781 exit:
7782     kfree(tb);
7783 }
7784 
7785 static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
7786                         struct sk_buff *skb)
7787 {
7788     const void **tb;
7789     const struct wmi_gtk_offload_status_event *ev;
7790     struct ath11k_vif *arvif;
7791     __be64 replay_ctr_be;
7792     u64    replay_ctr;
7793     int ret;
7794 
7795     tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
7796     if (IS_ERR(tb)) {
7797         ret = PTR_ERR(tb);
7798         ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
7799         return;
7800     }
7801 
7802     ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
7803     if (!ev) {
7804         ath11k_warn(ab, "failed to fetch gtk offload status ev");
7805         kfree(tb);
7806         return;
7807     }
7808 
7809     arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
7810     if (!arvif) {
7811         ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
7812                 ev->vdev_id);
7813         kfree(tb);
7814         return;
7815     }
7816 
7817     ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
7818            ev->refresh_cnt);
7819     ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
7820             NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
7821 
7822     replay_ctr =  ev->replay_ctr.word1;
7823     replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
7824     arvif->rekey_data.replay_ctr = replay_ctr;
7825 
7826     /* supplicant expects big-endian replay counter */
7827     replay_ctr_be = cpu_to_be64(replay_ctr);
7828 
7829     ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
7830                    (void *)&replay_ctr_be, GFP_ATOMIC);
7831 
7832     kfree(tb);
7833 }
7834 
7835 static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
7836 {
7837     struct wmi_cmd_hdr *cmd_hdr;
7838     enum wmi_tlv_event_id id;
7839 
7840     cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
7841     id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
7842 
7843     trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
7844 
7845     if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
7846         goto out;
7847 
7848     switch (id) {
7849         /* Process all the WMI events here */
7850     case WMI_SERVICE_READY_EVENTID:
7851         ath11k_service_ready_event(ab, skb);
7852         break;
7853     case WMI_SERVICE_READY_EXT_EVENTID:
7854         ath11k_service_ready_ext_event(ab, skb);
7855         break;
7856     case WMI_SERVICE_READY_EXT2_EVENTID:
7857         ath11k_service_ready_ext2_event(ab, skb);
7858         break;
7859     case WMI_REG_CHAN_LIST_CC_EVENTID:
7860         ath11k_reg_chan_list_event(ab, skb);
7861         break;
7862     case WMI_READY_EVENTID:
7863         ath11k_ready_event(ab, skb);
7864         break;
7865     case WMI_PEER_DELETE_RESP_EVENTID:
7866         ath11k_peer_delete_resp_event(ab, skb);
7867         break;
7868     case WMI_VDEV_START_RESP_EVENTID:
7869         ath11k_vdev_start_resp_event(ab, skb);
7870         break;
7871     case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
7872         ath11k_bcn_tx_status_event(ab, skb);
7873         break;
7874     case WMI_VDEV_STOPPED_EVENTID:
7875         ath11k_vdev_stopped_event(ab, skb);
7876         break;
7877     case WMI_MGMT_RX_EVENTID:
7878         ath11k_mgmt_rx_event(ab, skb);
7879         /* mgmt_rx_event() owns the skb now! */
7880         return;
7881     case WMI_MGMT_TX_COMPLETION_EVENTID:
7882         ath11k_mgmt_tx_compl_event(ab, skb);
7883         break;
7884     case WMI_SCAN_EVENTID:
7885         ath11k_scan_event(ab, skb);
7886         break;
7887     case WMI_PEER_STA_KICKOUT_EVENTID:
7888         ath11k_peer_sta_kickout_event(ab, skb);
7889         break;
7890     case WMI_ROAM_EVENTID:
7891         ath11k_roam_event(ab, skb);
7892         break;
7893     case WMI_CHAN_INFO_EVENTID:
7894         ath11k_chan_info_event(ab, skb);
7895         break;
7896     case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
7897         ath11k_pdev_bss_chan_info_event(ab, skb);
7898         break;
7899     case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
7900         ath11k_vdev_install_key_compl_event(ab, skb);
7901         break;
7902     case WMI_SERVICE_AVAILABLE_EVENTID:
7903         ath11k_service_available_event(ab, skb);
7904         break;
7905     case WMI_PEER_ASSOC_CONF_EVENTID:
7906         ath11k_peer_assoc_conf_event(ab, skb);
7907         break;
7908     case WMI_UPDATE_STATS_EVENTID:
7909         ath11k_update_stats_event(ab, skb);
7910         break;
7911     case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
7912         ath11k_pdev_ctl_failsafe_check_event(ab, skb);
7913         break;
7914     case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
7915         ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
7916         break;
7917     case WMI_PDEV_TEMPERATURE_EVENTID:
7918         ath11k_wmi_pdev_temperature_event(ab, skb);
7919         break;
7920     case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
7921         ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
7922         break;
7923     case WMI_HOST_FILS_DISCOVERY_EVENTID:
7924         ath11k_fils_discovery_event(ab, skb);
7925         break;
7926     case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
7927         ath11k_probe_resp_tx_status_event(ab, skb);
7928         break;
7929     case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
7930         ath11k_wmi_obss_color_collision_event(ab, skb);
7931         break;
7932     case WMI_TWT_ADD_DIALOG_EVENTID:
7933         ath11k_wmi_twt_add_dialog_event(ab, skb);
7934         break;
7935     /* add Unsupported events here */
7936     case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
7937     case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
7938     case WMI_TWT_ENABLE_EVENTID:
7939     case WMI_TWT_DISABLE_EVENTID:
7940     case WMI_TWT_DEL_DIALOG_EVENTID:
7941     case WMI_TWT_PAUSE_DIALOG_EVENTID:
7942     case WMI_TWT_RESUME_DIALOG_EVENTID:
7943     case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
7944     case WMI_PEER_CREATE_CONF_EVENTID:
7945         ath11k_dbg(ab, ATH11K_DBG_WMI,
7946                "ignoring unsupported event 0x%x\n", id);
7947         break;
7948     case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
7949         ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
7950         break;
7951     case WMI_VDEV_DELETE_RESP_EVENTID:
7952         ath11k_vdev_delete_resp_event(ab, skb);
7953         break;
7954     case WMI_WOW_WAKEUP_HOST_EVENTID:
7955         ath11k_wmi_event_wow_wakeup_host(ab, skb);
7956         break;
7957     case WMI_11D_NEW_COUNTRY_EVENTID:
7958         ath11k_reg_11d_new_cc_event(ab, skb);
7959         break;
7960     case WMI_DIAG_EVENTID:
7961         ath11k_wmi_diag_event(ab, skb);
7962         break;
7963     case WMI_GTK_OFFLOAD_STATUS_EVENTID:
7964         ath11k_wmi_gtk_offload_status_event(ab, skb);
7965         break;
7966     /* TODO: Add remaining events */
7967     default:
7968         ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
7969         break;
7970     }
7971 
7972 out:
7973     dev_kfree_skb(skb);
7974 }
7975 
7976 static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
7977                        u32 pdev_idx)
7978 {
7979     int status;
7980     u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
7981              ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
7982              ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
7983 
7984     struct ath11k_htc_svc_conn_req conn_req;
7985     struct ath11k_htc_svc_conn_resp conn_resp;
7986 
7987     memset(&conn_req, 0, sizeof(conn_req));
7988     memset(&conn_resp, 0, sizeof(conn_resp));
7989 
7990     /* these fields are the same for all service endpoints */
7991     conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
7992     conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
7993     conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
7994 
7995     /* connect to control service */
7996     conn_req.service_id = svc_id[pdev_idx];
7997 
7998     status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
7999     if (status) {
8000         ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
8001                 status);
8002         return status;
8003     }
8004 
8005     ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
8006     ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
8007     ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
8008     init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
8009 
8010     return 0;
8011 }
8012 
8013 static int
8014 ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
8015                   struct wmi_unit_test_cmd ut_cmd,
8016                   u32 *test_args)
8017 {
8018     struct ath11k_pdev_wmi *wmi = ar->wmi;
8019     struct wmi_unit_test_cmd *cmd;
8020     struct sk_buff *skb;
8021     struct wmi_tlv *tlv;
8022     void *ptr;
8023     u32 *ut_cmd_args;
8024     int buf_len, arg_len;
8025     int ret;
8026     int i;
8027 
8028     arg_len = sizeof(u32) * ut_cmd.num_args;
8029     buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
8030 
8031     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
8032     if (!skb)
8033         return -ENOMEM;
8034 
8035     cmd = (struct wmi_unit_test_cmd *)skb->data;
8036     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
8037               FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
8038 
8039     cmd->vdev_id = ut_cmd.vdev_id;
8040     cmd->module_id = ut_cmd.module_id;
8041     cmd->num_args = ut_cmd.num_args;
8042     cmd->diag_token = ut_cmd.diag_token;
8043 
8044     ptr = skb->data + sizeof(ut_cmd);
8045 
8046     tlv = ptr;
8047     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
8048               FIELD_PREP(WMI_TLV_LEN, arg_len);
8049 
8050     ptr += TLV_HDR_SIZE;
8051 
8052     ut_cmd_args = ptr;
8053     for (i = 0; i < ut_cmd.num_args; i++)
8054         ut_cmd_args[i] = test_args[i];
8055 
8056     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
8057 
8058     if (ret) {
8059         ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
8060                 ret);
8061         dev_kfree_skb(skb);
8062     }
8063 
8064     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
8065            "WMI unit test : module %d vdev %d n_args %d token %d\n",
8066            cmd->module_id, cmd->vdev_id, cmd->num_args,
8067            cmd->diag_token);
8068 
8069     return ret;
8070 }
8071 
8072 int ath11k_wmi_simulate_radar(struct ath11k *ar)
8073 {
8074     struct ath11k_vif *arvif;
8075     u32 dfs_args[DFS_MAX_TEST_ARGS];
8076     struct wmi_unit_test_cmd wmi_ut;
8077     bool arvif_found = false;
8078 
8079     list_for_each_entry(arvif, &ar->arvifs, list) {
8080         if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
8081             arvif_found = true;
8082             break;
8083         }
8084     }
8085 
8086     if (!arvif_found)
8087         return -EINVAL;
8088 
8089     dfs_args[DFS_TEST_CMDID] = 0;
8090     dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
8091     /* Currently we could pass segment_id(b0 - b1), chirp(b2)
8092      * freq offset (b3 - b10) to unit test. For simulation
8093      * purpose this can be set to 0 which is valid.
8094      */
8095     dfs_args[DFS_TEST_RADAR_PARAM] = 0;
8096 
8097     wmi_ut.vdev_id = arvif->vdev_id;
8098     wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
8099     wmi_ut.num_args = DFS_MAX_TEST_ARGS;
8100     wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
8101 
8102     ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
8103 
8104     return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
8105 }
8106 
8107 int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
8108                  struct ath11k_fw_dbglog *dbglog)
8109 {
8110     struct ath11k_pdev_wmi *wmi = ar->wmi;
8111     struct wmi_debug_log_config_cmd_fixed_param *cmd;
8112     struct sk_buff *skb;
8113     struct wmi_tlv *tlv;
8114     int ret, len;
8115 
8116     len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
8117     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
8118     if (!skb)
8119         return -ENOMEM;
8120 
8121     cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
8122     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
8123               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8124     cmd->dbg_log_param = dbglog->param;
8125 
8126     tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
8127     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
8128               FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
8129 
8130     switch (dbglog->param) {
8131     case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
8132     case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
8133     case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
8134     case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
8135         cmd->value = dbglog->value;
8136         break;
8137     case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
8138     case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
8139         cmd->value = dbglog->value;
8140         memcpy(tlv->value, module_id_bitmap,
8141                MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
8142         /* clear current config to be used for next user config */
8143         memset(module_id_bitmap, 0,
8144                MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
8145         break;
8146     default:
8147         dev_kfree_skb(skb);
8148         return -EINVAL;
8149     }
8150 
8151     ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
8152     if (ret) {
8153         ath11k_warn(ar->ab,
8154                 "failed to send WMI_DBGLOG_CFG_CMDID\n");
8155         dev_kfree_skb(skb);
8156     }
8157     return ret;
8158 }
8159 
8160 int ath11k_wmi_connect(struct ath11k_base *ab)
8161 {
8162     u32 i;
8163     u8 wmi_ep_count;
8164 
8165     wmi_ep_count = ab->htc.wmi_ep_count;
8166     if (wmi_ep_count > ab->hw_params.max_radios)
8167         return -1;
8168 
8169     for (i = 0; i < wmi_ep_count; i++)
8170         ath11k_connect_pdev_htc_service(ab, i);
8171 
8172     return 0;
8173 }
8174 
8175 static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
8176 {
8177     if (WARN_ON(pdev_id >= MAX_RADIOS))
8178         return;
8179 
8180     /* TODO: Deinit any pdev specific wmi resource */
8181 }
8182 
8183 int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
8184                u8 pdev_id)
8185 {
8186     struct ath11k_pdev_wmi *wmi_handle;
8187 
8188     if (pdev_id >= ab->hw_params.max_radios)
8189         return -EINVAL;
8190 
8191     wmi_handle = &ab->wmi_ab.wmi[pdev_id];
8192 
8193     wmi_handle->wmi_ab = &ab->wmi_ab;
8194 
8195     ab->wmi_ab.ab = ab;
8196     /* TODO: Init remaining resource specific to pdev */
8197 
8198     return 0;
8199 }
8200 
8201 int ath11k_wmi_attach(struct ath11k_base *ab)
8202 {
8203     int ret;
8204 
8205     ret = ath11k_wmi_pdev_attach(ab, 0);
8206     if (ret)
8207         return ret;
8208 
8209     ab->wmi_ab.ab = ab;
8210     ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
8211 
8212     /* It's overwritten when service_ext_ready is handled */
8213     if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
8214         ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
8215 
8216     /* TODO: Init remaining wmi soc resources required */
8217     init_completion(&ab->wmi_ab.service_ready);
8218     init_completion(&ab->wmi_ab.unified_ready);
8219 
8220     return 0;
8221 }
8222 
8223 void ath11k_wmi_detach(struct ath11k_base *ab)
8224 {
8225     int i;
8226 
8227     /* TODO: Deinit wmi resource specific to SOC as required */
8228 
8229     for (i = 0; i < ab->htc.wmi_ep_count; i++)
8230         ath11k_wmi_pdev_detach(ab, i);
8231 
8232     ath11k_wmi_free_dbring_caps(ab);
8233 }
8234 
8235 int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
8236                   u32 filter_bitmap, bool enable)
8237 {
8238     struct wmi_hw_data_filter_cmd *cmd;
8239     struct sk_buff *skb;
8240     int len;
8241 
8242     len = sizeof(*cmd);
8243     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8244 
8245     if (!skb)
8246         return -ENOMEM;
8247 
8248     cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
8249     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
8250               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8251 
8252     cmd->vdev_id = vdev_id;
8253     cmd->enable = enable;
8254 
8255     /* Set all modes in case of disable */
8256     if (cmd->enable)
8257         cmd->hw_filter_bitmap = filter_bitmap;
8258     else
8259         cmd->hw_filter_bitmap = ((u32)~0U);
8260 
8261     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
8262            "wmi hw data filter enable %d filter_bitmap 0x%x\n",
8263            enable, filter_bitmap);
8264 
8265     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
8266 }
8267 
8268 int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
8269 {
8270     struct wmi_wow_host_wakeup_ind *cmd;
8271     struct sk_buff *skb;
8272     size_t len;
8273 
8274     len = sizeof(*cmd);
8275     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8276     if (!skb)
8277         return -ENOMEM;
8278 
8279     cmd = (struct wmi_wow_host_wakeup_ind *)skb->data;
8280     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8281                      WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) |
8282               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8283 
8284     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
8285 
8286     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
8287 }
8288 
8289 int ath11k_wmi_wow_enable(struct ath11k *ar)
8290 {
8291     struct wmi_wow_enable_cmd *cmd;
8292     struct sk_buff *skb;
8293     int len;
8294 
8295     len = sizeof(*cmd);
8296     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8297     if (!skb)
8298         return -ENOMEM;
8299 
8300     cmd = (struct wmi_wow_enable_cmd *)skb->data;
8301     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) |
8302               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8303 
8304     cmd->enable = 1;
8305     cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED;
8306     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow enable\n");
8307 
8308     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
8309 }
8310 
8311 int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
8312                  const u8 mac_addr[ETH_ALEN])
8313 {
8314     struct sk_buff *skb;
8315     struct wmi_scan_prob_req_oui_cmd *cmd;
8316     u32 prob_req_oui;
8317     int len;
8318 
8319     prob_req_oui = (((u32)mac_addr[0]) << 16) |
8320                (((u32)mac_addr[1]) << 8) | mac_addr[2];
8321 
8322     len = sizeof(*cmd);
8323     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8324     if (!skb)
8325         return -ENOMEM;
8326 
8327     cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
8328     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8329                      WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
8330               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8331     cmd->prob_req_oui = prob_req_oui;
8332 
8333     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n",
8334            prob_req_oui);
8335 
8336     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
8337 }
8338 
8339 int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
8340                     enum wmi_wow_wakeup_event event,
8341                 u32 enable)
8342 {
8343     struct wmi_wow_add_del_event_cmd *cmd;
8344     struct sk_buff *skb;
8345     size_t len;
8346 
8347     len = sizeof(*cmd);
8348     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8349     if (!skb)
8350         return -ENOMEM;
8351 
8352     cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
8353     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
8354               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8355 
8356     cmd->vdev_id = vdev_id;
8357     cmd->is_add = enable;
8358     cmd->event_bitmap = (1 << event);
8359 
8360     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
8361            wow_wakeup_event(event), enable, vdev_id);
8362 
8363     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
8364 }
8365 
8366 int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
8367                    const u8 *pattern, const u8 *mask,
8368                int pattern_len, int pattern_offset)
8369 {
8370     struct wmi_wow_add_pattern_cmd *cmd;
8371     struct wmi_wow_bitmap_pattern *bitmap;
8372     struct wmi_tlv *tlv;
8373     struct sk_buff *skb;
8374     u8 *ptr;
8375     size_t len;
8376 
8377     len = sizeof(*cmd) +
8378           sizeof(*tlv) +            /* array struct */
8379           sizeof(*bitmap) +         /* bitmap */
8380           sizeof(*tlv) +            /* empty ipv4 sync */
8381           sizeof(*tlv) +            /* empty ipv6 sync */
8382           sizeof(*tlv) +            /* empty magic */
8383           sizeof(*tlv) +            /* empty info timeout */
8384           sizeof(*tlv) + sizeof(u32);   /* ratelimit interval */
8385 
8386     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8387     if (!skb)
8388         return -ENOMEM;
8389 
8390     /* cmd */
8391     ptr = (u8 *)skb->data;
8392     cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
8393     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8394                      WMI_TAG_WOW_ADD_PATTERN_CMD) |
8395               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8396 
8397     cmd->vdev_id = vdev_id;
8398     cmd->pattern_id = pattern_id;
8399     cmd->pattern_type = WOW_BITMAP_PATTERN;
8400 
8401     ptr += sizeof(*cmd);
8402 
8403     /* bitmap */
8404     tlv = (struct wmi_tlv *)ptr;
8405     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8406                  WMI_TAG_ARRAY_STRUCT) |
8407               FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
8408 
8409     ptr += sizeof(*tlv);
8410 
8411     bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
8412     bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8413                     WMI_TAG_WOW_BITMAP_PATTERN_T) |
8414                  FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
8415 
8416     memcpy(bitmap->patternbuf, pattern, pattern_len);
8417     ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
8418     memcpy(bitmap->bitmaskbuf, mask, pattern_len);
8419     ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
8420     bitmap->pattern_offset = pattern_offset;
8421     bitmap->pattern_len = pattern_len;
8422     bitmap->bitmask_len = pattern_len;
8423     bitmap->pattern_id = pattern_id;
8424 
8425     ptr += sizeof(*bitmap);
8426 
8427     /* ipv4 sync */
8428     tlv = (struct wmi_tlv *)ptr;
8429     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8430                  WMI_TAG_ARRAY_STRUCT) |
8431               FIELD_PREP(WMI_TLV_LEN, 0);
8432 
8433     ptr += sizeof(*tlv);
8434 
8435     /* ipv6 sync */
8436     tlv = (struct wmi_tlv *)ptr;
8437     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8438                  WMI_TAG_ARRAY_STRUCT) |
8439               FIELD_PREP(WMI_TLV_LEN, 0);
8440 
8441     ptr += sizeof(*tlv);
8442 
8443     /* magic */
8444     tlv = (struct wmi_tlv *)ptr;
8445     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8446                  WMI_TAG_ARRAY_STRUCT) |
8447               FIELD_PREP(WMI_TLV_LEN, 0);
8448 
8449     ptr += sizeof(*tlv);
8450 
8451     /* pattern info timeout */
8452     tlv = (struct wmi_tlv *)ptr;
8453     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8454                  WMI_TAG_ARRAY_UINT32) |
8455               FIELD_PREP(WMI_TLV_LEN, 0);
8456 
8457     ptr += sizeof(*tlv);
8458 
8459     /* ratelimit interval */
8460     tlv = (struct wmi_tlv *)ptr;
8461     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8462                  WMI_TAG_ARRAY_UINT32) |
8463               FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
8464 
8465     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
8466            vdev_id, pattern_id, pattern_offset);
8467 
8468     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
8469 }
8470 
8471 int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
8472 {
8473     struct wmi_wow_del_pattern_cmd *cmd;
8474     struct sk_buff *skb;
8475     size_t len;
8476 
8477     len = sizeof(*cmd);
8478     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8479     if (!skb)
8480         return -ENOMEM;
8481 
8482     cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
8483     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8484                      WMI_TAG_WOW_DEL_PATTERN_CMD) |
8485               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8486 
8487     cmd->vdev_id = vdev_id;
8488     cmd->pattern_id = pattern_id;
8489     cmd->pattern_type = WOW_BITMAP_PATTERN;
8490 
8491     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
8492            vdev_id, pattern_id);
8493 
8494     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
8495 }
8496 
8497 static struct sk_buff *
8498 ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
8499                    u32 vdev_id,
8500                        struct wmi_pno_scan_req *pno)
8501 {
8502     struct nlo_configured_parameters *nlo_list;
8503     struct wmi_wow_nlo_config_cmd *cmd;
8504     struct wmi_tlv *tlv;
8505     struct sk_buff *skb;
8506     u32 *channel_list;
8507     size_t len, nlo_list_len, channel_list_len;
8508     u8 *ptr;
8509     u32 i;
8510 
8511     len = sizeof(*cmd) +
8512           sizeof(*tlv) +
8513           /* TLV place holder for array of structures
8514            * nlo_configured_parameters(nlo_list)
8515            */
8516           sizeof(*tlv);
8517           /* TLV place holder for array of uint32 channel_list */
8518 
8519     channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
8520     len += channel_list_len;
8521 
8522     nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
8523     len += nlo_list_len;
8524 
8525     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8526     if (!skb)
8527         return ERR_PTR(-ENOMEM);
8528 
8529     ptr = (u8 *)skb->data;
8530     cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
8531     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
8532               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8533 
8534     cmd->vdev_id = pno->vdev_id;
8535     cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
8536 
8537     /* current FW does not support min-max range for dwell time */
8538     cmd->active_dwell_time = pno->active_max_time;
8539     cmd->passive_dwell_time = pno->passive_max_time;
8540 
8541     if (pno->do_passive_scan)
8542         cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
8543 
8544     cmd->fast_scan_period = pno->fast_scan_period;
8545     cmd->slow_scan_period = pno->slow_scan_period;
8546     cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
8547     cmd->delay_start_time = pno->delay_start_time;
8548 
8549     if (pno->enable_pno_scan_randomization) {
8550         cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
8551                 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
8552         ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
8553         ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
8554         ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
8555         ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
8556     }
8557 
8558     ptr += sizeof(*cmd);
8559 
8560     /* nlo_configured_parameters(nlo_list) */
8561     cmd->no_of_ssids = pno->uc_networks_count;
8562     tlv = (struct wmi_tlv *)ptr;
8563     tlv->header = FIELD_PREP(WMI_TLV_TAG,
8564                  WMI_TAG_ARRAY_STRUCT) |
8565               FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
8566 
8567     ptr += sizeof(*tlv);
8568     nlo_list = (struct nlo_configured_parameters *)ptr;
8569     for (i = 0; i < cmd->no_of_ssids; i++) {
8570         tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
8571         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
8572                   FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
8573 
8574         nlo_list[i].ssid.valid = true;
8575         nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
8576         memcpy(nlo_list[i].ssid.ssid.ssid,
8577                pno->a_networks[i].ssid.ssid,
8578                nlo_list[i].ssid.ssid.ssid_len);
8579         ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
8580                     roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
8581 
8582         if (pno->a_networks[i].rssi_threshold &&
8583             pno->a_networks[i].rssi_threshold > -300) {
8584             nlo_list[i].rssi_cond.valid = true;
8585             nlo_list[i].rssi_cond.rssi =
8586                 pno->a_networks[i].rssi_threshold;
8587         }
8588 
8589         nlo_list[i].bcast_nw_type.valid = true;
8590         nlo_list[i].bcast_nw_type.bcast_nw_type =
8591             pno->a_networks[i].bcast_nw_type;
8592     }
8593 
8594     ptr += nlo_list_len;
8595     cmd->num_of_channels = pno->a_networks[0].channel_count;
8596     tlv = (struct wmi_tlv *)ptr;
8597     tlv->header =  FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
8598                FIELD_PREP(WMI_TLV_LEN, channel_list_len);
8599     ptr += sizeof(*tlv);
8600     channel_list = (u32 *)ptr;
8601     for (i = 0; i < cmd->num_of_channels; i++)
8602         channel_list[i] = pno->a_networks[0].channels[i];
8603 
8604     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
8605            vdev_id);
8606 
8607     return skb;
8608 }
8609 
8610 static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
8611                              u32 vdev_id)
8612 {
8613     struct wmi_wow_nlo_config_cmd *cmd;
8614     struct sk_buff *skb;
8615     size_t len;
8616 
8617     len = sizeof(*cmd);
8618     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8619     if (!skb)
8620         return ERR_PTR(-ENOMEM);
8621 
8622     cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
8623     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
8624               FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
8625 
8626     cmd->vdev_id = vdev_id;
8627     cmd->flags = WMI_NLO_CONFIG_STOP;
8628 
8629     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
8630            "wmi tlv stop pno config vdev_id %d\n", vdev_id);
8631     return skb;
8632 }
8633 
8634 int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
8635                   struct wmi_pno_scan_req  *pno_scan)
8636 {
8637     struct sk_buff *skb;
8638 
8639     if (pno_scan->enable)
8640         skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
8641     else
8642         skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
8643 
8644     if (IS_ERR_OR_NULL(skb))
8645         return -ENOMEM;
8646 
8647     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
8648 }
8649 
8650 static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
8651                        struct ath11k_arp_ns_offload *offload,
8652                        u8 **ptr,
8653                        bool enable,
8654                        bool ext)
8655 {
8656     struct wmi_ns_offload_tuple *ns;
8657     struct wmi_tlv *tlv;
8658     u8 *buf_ptr = *ptr;
8659     u32 ns_cnt, ns_ext_tuples;
8660     int i, max_offloads;
8661 
8662     ns_cnt = offload->ipv6_count;
8663 
8664     tlv  = (struct wmi_tlv *)buf_ptr;
8665 
8666     if (ext) {
8667         ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
8668         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
8669                   FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
8670         i = WMI_MAX_NS_OFFLOADS;
8671         max_offloads = offload->ipv6_count;
8672     } else {
8673         tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
8674                   FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
8675         i = 0;
8676         max_offloads = WMI_MAX_NS_OFFLOADS;
8677     }
8678 
8679     buf_ptr += sizeof(*tlv);
8680 
8681     for (; i < max_offloads; i++) {
8682         ns = (struct wmi_ns_offload_tuple *)buf_ptr;
8683         ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
8684                  FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
8685 
8686         if (enable) {
8687             if (i < ns_cnt)
8688                 ns->flags |= WMI_NSOL_FLAGS_VALID;
8689 
8690             memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
8691             memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
8692             ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
8693             ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
8694 
8695             if (offload->ipv6_type[i])
8696                 ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
8697 
8698             memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
8699             ath11k_ce_byte_swap(ns->target_mac.addr, 8);
8700 
8701             if (ns->target_mac.word0 != 0 ||
8702                 ns->target_mac.word1 != 0) {
8703                 ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
8704             }
8705 
8706             ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
8707                    "wmi index %d ns_solicited %pI6 target %pI6",
8708                    i, ns->solicitation_ipaddr,
8709                    ns->target_ipaddr[0]);
8710         }
8711 
8712         buf_ptr += sizeof(*ns);
8713     }
8714 
8715     *ptr = buf_ptr;
8716 }
8717 
8718 static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
8719                     struct ath11k_arp_ns_offload *offload,
8720                     u8 **ptr,
8721                     bool enable)
8722 {
8723     struct wmi_arp_offload_tuple *arp;
8724     struct wmi_tlv *tlv;
8725     u8 *buf_ptr = *ptr;
8726     int i;
8727 
8728     /* fill arp tuple */
8729     tlv = (struct wmi_tlv *)buf_ptr;
8730     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
8731               FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
8732     buf_ptr += sizeof(*tlv);
8733 
8734     for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
8735         arp = (struct wmi_arp_offload_tuple *)buf_ptr;
8736         arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
8737                   FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
8738 
8739         if (enable && i < offload->ipv4_count) {
8740             /* Copy the target ip addr and flags */
8741             arp->flags = WMI_ARPOL_FLAGS_VALID;
8742             memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
8743             ath11k_ce_byte_swap(arp->target_ipaddr, 4);
8744 
8745             ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4",
8746                    arp->target_ipaddr);
8747         }
8748 
8749         buf_ptr += sizeof(*arp);
8750     }
8751 
8752     *ptr = buf_ptr;
8753 }
8754 
8755 int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
8756                   struct ath11k_vif *arvif, bool enable)
8757 {
8758     struct ath11k_arp_ns_offload *offload;
8759     struct wmi_set_arp_ns_offload_cmd *cmd;
8760     struct wmi_tlv *tlv;
8761     struct sk_buff *skb;
8762     u8 *buf_ptr;
8763     size_t len;
8764     u8 ns_cnt, ns_ext_tuples = 0;
8765 
8766     offload = &arvif->arp_ns_offload;
8767     ns_cnt = offload->ipv6_count;
8768 
8769     len = sizeof(*cmd) +
8770           sizeof(*tlv) +
8771           WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
8772           sizeof(*tlv) +
8773           WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
8774 
8775     if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
8776         ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
8777         len += sizeof(*tlv) +
8778                ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
8779     }
8780 
8781     skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8782     if (!skb)
8783         return -ENOMEM;
8784 
8785     buf_ptr = skb->data;
8786     cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
8787     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8788                      WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
8789               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8790 
8791     cmd->flags = 0;
8792     cmd->vdev_id = arvif->vdev_id;
8793     cmd->num_ns_ext_tuples = ns_ext_tuples;
8794 
8795     buf_ptr += sizeof(*cmd);
8796 
8797     ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
8798     ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
8799 
8800     if (ns_ext_tuples)
8801         ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
8802 
8803     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
8804 }
8805 
8806 int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
8807                  struct ath11k_vif *arvif, bool enable)
8808 {
8809     struct wmi_gtk_rekey_offload_cmd *cmd;
8810     struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
8811     int len;
8812     struct sk_buff *skb;
8813     __le64 replay_ctr;
8814 
8815     len = sizeof(*cmd);
8816     skb =  ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8817     if (!skb)
8818         return -ENOMEM;
8819 
8820     cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8821     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
8822               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8823 
8824     cmd->vdev_id = arvif->vdev_id;
8825 
8826     if (enable) {
8827         cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
8828 
8829         /* the length in rekey_data and cmd is equal */
8830         memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
8831         ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
8832         memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
8833         ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
8834 
8835         replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
8836         memcpy(cmd->replay_ctr, &replay_ctr,
8837                sizeof(replay_ctr));
8838         ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
8839     } else {
8840         cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
8841     }
8842 
8843     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
8844            arvif->vdev_id, enable);
8845     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8846 }
8847 
8848 int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
8849                  struct ath11k_vif *arvif)
8850 {
8851     struct wmi_gtk_rekey_offload_cmd *cmd;
8852     int len;
8853     struct sk_buff *skb;
8854 
8855     len = sizeof(*cmd);
8856     skb =  ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
8857     if (!skb)
8858         return -ENOMEM;
8859 
8860     cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
8861     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
8862               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8863 
8864     cmd->vdev_id = arvif->vdev_id;
8865     cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
8866 
8867     ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
8868            arvif->vdev_id);
8869     return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
8870 }
8871 
8872 int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
8873 {   struct ath11k_pdev_wmi *wmi = ar->wmi;
8874     struct wmi_pdev_set_sar_table_cmd *cmd;
8875     struct wmi_tlv *tlv;
8876     struct sk_buff *skb;
8877     u8 *buf_ptr;
8878     u32 len, sar_len_aligned, rsvd_len_aligned;
8879 
8880     sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
8881     rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
8882     len = sizeof(*cmd) +
8883           TLV_HDR_SIZE + sar_len_aligned +
8884           TLV_HDR_SIZE + rsvd_len_aligned;
8885 
8886     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
8887     if (!skb)
8888         return -ENOMEM;
8889 
8890     cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
8891     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
8892               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8893     cmd->pdev_id = ar->pdev->pdev_id;
8894     cmd->sar_len = BIOS_SAR_TABLE_LEN;
8895     cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
8896 
8897     buf_ptr = skb->data + sizeof(*cmd);
8898     tlv = (struct wmi_tlv *)buf_ptr;
8899     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
8900               FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
8901     buf_ptr += TLV_HDR_SIZE;
8902     memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
8903 
8904     buf_ptr += sar_len_aligned;
8905     tlv = (struct wmi_tlv *)buf_ptr;
8906     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
8907               FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
8908 
8909     return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
8910 }
8911 
8912 int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
8913 {
8914     struct ath11k_pdev_wmi *wmi = ar->wmi;
8915     struct wmi_pdev_set_geo_table_cmd *cmd;
8916     struct wmi_tlv *tlv;
8917     struct sk_buff *skb;
8918     u8 *buf_ptr;
8919     u32 len, rsvd_len_aligned;
8920 
8921     rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
8922     len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
8923 
8924     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
8925     if (!skb)
8926         return -ENOMEM;
8927 
8928     cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
8929     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
8930               FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8931     cmd->pdev_id = ar->pdev->pdev_id;
8932     cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
8933 
8934     buf_ptr = skb->data + sizeof(*cmd);
8935     tlv = (struct wmi_tlv *)buf_ptr;
8936     tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
8937               FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
8938 
8939     return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
8940 }
8941 
8942 int ath11k_wmi_sta_keepalive(struct ath11k *ar,
8943                  const struct wmi_sta_keepalive_arg *arg)
8944 {
8945     struct ath11k_pdev_wmi *wmi = ar->wmi;
8946     struct wmi_sta_keepalive_cmd *cmd;
8947     struct wmi_sta_keepalive_arp_resp *arp;
8948     struct sk_buff *skb;
8949     size_t len;
8950 
8951     len = sizeof(*cmd) + sizeof(*arp);
8952     skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
8953     if (!skb)
8954         return -ENOMEM;
8955 
8956     cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
8957     cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8958                      WMI_TAG_STA_KEEPALIVE_CMD) |
8959                      FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
8960     cmd->vdev_id = arg->vdev_id;
8961     cmd->enabled = arg->enabled;
8962     cmd->interval = arg->interval;
8963     cmd->method = arg->method;
8964 
8965     if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
8966         arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
8967         arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
8968         arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
8969                          WMI_TAG_STA_KEEPALVE_ARP_RESPONSE) |
8970                  FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
8971         arp->src_ip4_addr = arg->src_ip4_addr;
8972         arp->dest_ip4_addr = arg->dest_ip4_addr;
8973         ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
8974     }
8975 
8976     ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
8977            "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
8978            arg->vdev_id, arg->enabled, arg->method, arg->interval);
8979 
8980     return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
8981 }