0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _WMI_OPS_H_
0009 #define _WMI_OPS_H_
0010
0011 struct ath10k;
0012 struct sk_buff;
0013
0014 struct wmi_ops {
0015 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
0016 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
0017 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
0018
0019 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
0020 struct wmi_scan_ev_arg *arg);
0021 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
0022 struct wmi_mgmt_rx_ev_arg *arg);
0023 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
0024 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
0025 int (*pull_mgmt_tx_bundle_compl)(
0026 struct ath10k *ar, struct sk_buff *skb,
0027 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
0028 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
0029 struct wmi_ch_info_ev_arg *arg);
0030 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
0031 struct wmi_vdev_start_ev_arg *arg);
0032 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
0033 struct wmi_peer_kick_ev_arg *arg);
0034 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
0035 struct wmi_swba_ev_arg *arg);
0036 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
0037 struct wmi_phyerr_hdr_arg *arg);
0038 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
0039 int left_len, struct wmi_phyerr_ev_arg *arg);
0040 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
0041 struct wmi_svc_rdy_ev_arg *arg);
0042 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
0043 struct wmi_rdy_ev_arg *arg);
0044 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
0045 struct ath10k_fw_stats *stats);
0046 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
0047 struct wmi_roam_ev_arg *arg);
0048 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
0049 struct wmi_wow_ev_arg *arg);
0050 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
0051 struct wmi_echo_ev_arg *arg);
0052 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
0053 struct wmi_dfs_status_ev_arg *arg);
0054 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
0055 struct wmi_svc_avail_ev_arg *arg);
0056
0057 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
0058
0059 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
0060 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
0061 struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
0062 const u8 macaddr[ETH_ALEN]);
0063 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
0064 u16 rd5g, u16 ctl2g, u16 ctl5g,
0065 enum wmi_dfs_region dfs_reg);
0066 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
0067 u32 value);
0068 struct sk_buff *(*gen_init)(struct ath10k *ar);
0069 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
0070 const struct wmi_start_scan_arg *arg);
0071 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
0072 const struct wmi_stop_scan_arg *arg);
0073 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
0074 enum wmi_vdev_type type,
0075 enum wmi_vdev_subtype subtype,
0076 const u8 macaddr[ETH_ALEN]);
0077 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
0078 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
0079 const struct wmi_vdev_start_request_arg *arg,
0080 bool restart);
0081 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
0082 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
0083 const u8 *bssid);
0084 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
0085 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
0086 u32 param_id, u32 param_value);
0087 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
0088 const struct wmi_vdev_install_key_arg *arg);
0089 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
0090 const struct wmi_vdev_spectral_conf_arg *arg);
0091 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
0092 u32 trigger, u32 enable);
0093 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
0094 const struct wmi_wmm_params_all_arg *arg);
0095 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
0096 const u8 peer_addr[ETH_ALEN],
0097 enum wmi_peer_type peer_type);
0098 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
0099 const u8 peer_addr[ETH_ALEN]);
0100 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
0101 const u8 peer_addr[ETH_ALEN],
0102 u32 tid_bitmap);
0103 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
0104 const u8 *peer_addr,
0105 enum wmi_peer_param param_id,
0106 u32 param_value);
0107 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
0108 const struct wmi_peer_assoc_complete_arg *arg);
0109 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
0110 enum wmi_sta_ps_mode psmode);
0111 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
0112 enum wmi_sta_powersave_param param_id,
0113 u32 value);
0114 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
0115 const u8 *mac,
0116 enum wmi_ap_ps_peer_param param_id,
0117 u32 value);
0118 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
0119 const struct wmi_scan_chan_list_arg *arg);
0120 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
0121 u32 prob_req_oui);
0122 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
0123 const void *bcn, size_t bcn_len,
0124 u32 bcn_paddr, bool dtim_zero,
0125 bool deliver_cab);
0126 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
0127 const struct wmi_wmm_params_all_arg *arg);
0128 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
0129 struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
0130 u32 vdev_id,
0131 enum
0132 wmi_peer_stats_info_request_type
0133 type,
0134 u8 *addr,
0135 u32 reset);
0136 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
0137 enum wmi_force_fw_hang_type type,
0138 u32 delay_ms);
0139 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
0140 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
0141 struct sk_buff *skb,
0142 dma_addr_t paddr);
0143 int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
0144 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
0145 u32 log_level);
0146 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
0147 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
0148 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
0149 u32 period, u32 duration,
0150 u32 next_offset,
0151 u32 enabled);
0152 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
0153 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
0154 const u8 *mac);
0155 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
0156 const u8 *mac, u32 tid, u32 buf_size);
0157 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
0158 const u8 *mac, u32 tid,
0159 u32 status);
0160 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
0161 const u8 *mac, u32 tid, u32 initiator,
0162 u32 reason);
0163 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
0164 u32 tim_ie_offset, struct sk_buff *bcn,
0165 u32 prb_caps, u32 prb_erp,
0166 void *prb_ies, size_t prb_ies_len);
0167 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
0168 struct sk_buff *bcn);
0169 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
0170 const u8 *p2p_ie);
0171 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
0172 const u8 peer_addr[ETH_ALEN],
0173 const struct wmi_sta_uapsd_auto_trig_arg *args,
0174 u32 num_ac);
0175 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
0176 const struct wmi_sta_keepalive_arg *arg);
0177 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
0178 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
0179 enum wmi_wow_wakeup_event event,
0180 u32 enable);
0181 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
0182 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
0183 u32 pattern_id,
0184 const u8 *pattern,
0185 const u8 *mask,
0186 int pattern_len,
0187 int pattern_offset);
0188 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
0189 u32 pattern_id);
0190 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
0191 u32 vdev_id,
0192 enum wmi_tdls_state state);
0193 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
0194 const struct wmi_tdls_peer_update_cmd_arg *arg,
0195 const struct wmi_tdls_peer_capab_arg *cap,
0196 const struct wmi_channel_arg *chan);
0197 struct sk_buff *(*gen_radar_found)
0198 (struct ath10k *ar,
0199 const struct ath10k_radar_found_info *arg);
0200 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
0201 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
0202 u32 param);
0203 void (*fw_stats_fill)(struct ath10k *ar,
0204 struct ath10k_fw_stats *fw_stats,
0205 char *buf);
0206 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
0207 u8 enable,
0208 u32 detect_level,
0209 u32 detect_margin);
0210 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
0211 enum wmi_host_platform_type type,
0212 u32 fw_feature_bitmap);
0213 int (*get_vdev_subtype)(struct ath10k *ar,
0214 enum wmi_vdev_subtype subtype);
0215 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
0216 u32 vdev_id,
0217 struct wmi_pno_scan_req *pno_scan);
0218 struct sk_buff *(*gen_pdev_bss_chan_info_req)
0219 (struct ath10k *ar,
0220 enum wmi_bss_survey_req_type type);
0221 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
0222 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
0223 u32 param);
0224 struct sk_buff *(*gen_bb_timing)
0225 (struct ath10k *ar,
0226 const struct wmi_bb_timing_cfg_arg *arg);
0227 struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
0228 const struct wmi_per_peer_per_tid_cfg_arg *arg);
0229
0230 };
0231
0232 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
0233
0234 static inline int
0235 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
0236 {
0237 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
0238 return -EOPNOTSUPP;
0239
0240 ar->wmi.ops->rx(ar, skb);
0241 return 0;
0242 }
0243
0244 static inline int
0245 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
0246 size_t len)
0247 {
0248 if (!ar->wmi.ops->map_svc)
0249 return -EOPNOTSUPP;
0250
0251 ar->wmi.ops->map_svc(in, out, len);
0252 return 0;
0253 }
0254
0255 static inline int
0256 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
0257 size_t len)
0258 {
0259 if (!ar->wmi.ops->map_svc_ext)
0260 return -EOPNOTSUPP;
0261
0262 ar->wmi.ops->map_svc_ext(in, out, len);
0263 return 0;
0264 }
0265
0266 static inline int
0267 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
0268 struct wmi_scan_ev_arg *arg)
0269 {
0270 if (!ar->wmi.ops->pull_scan)
0271 return -EOPNOTSUPP;
0272
0273 return ar->wmi.ops->pull_scan(ar, skb, arg);
0274 }
0275
0276 static inline int
0277 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
0278 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
0279 {
0280 if (!ar->wmi.ops->pull_mgmt_tx_compl)
0281 return -EOPNOTSUPP;
0282
0283 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
0284 }
0285
0286 static inline int
0287 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
0288 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
0289 {
0290 if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
0291 return -EOPNOTSUPP;
0292
0293 return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
0294 }
0295
0296 static inline int
0297 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
0298 struct wmi_mgmt_rx_ev_arg *arg)
0299 {
0300 if (!ar->wmi.ops->pull_mgmt_rx)
0301 return -EOPNOTSUPP;
0302
0303 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
0304 }
0305
0306 static inline int
0307 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
0308 struct wmi_ch_info_ev_arg *arg)
0309 {
0310 if (!ar->wmi.ops->pull_ch_info)
0311 return -EOPNOTSUPP;
0312
0313 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
0314 }
0315
0316 static inline int
0317 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
0318 struct wmi_vdev_start_ev_arg *arg)
0319 {
0320 if (!ar->wmi.ops->pull_vdev_start)
0321 return -EOPNOTSUPP;
0322
0323 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
0324 }
0325
0326 static inline int
0327 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
0328 struct wmi_peer_kick_ev_arg *arg)
0329 {
0330 if (!ar->wmi.ops->pull_peer_kick)
0331 return -EOPNOTSUPP;
0332
0333 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
0334 }
0335
0336 static inline int
0337 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
0338 struct wmi_swba_ev_arg *arg)
0339 {
0340 if (!ar->wmi.ops->pull_swba)
0341 return -EOPNOTSUPP;
0342
0343 return ar->wmi.ops->pull_swba(ar, skb, arg);
0344 }
0345
0346 static inline int
0347 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
0348 struct wmi_phyerr_hdr_arg *arg)
0349 {
0350 if (!ar->wmi.ops->pull_phyerr_hdr)
0351 return -EOPNOTSUPP;
0352
0353 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
0354 }
0355
0356 static inline int
0357 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
0358 int left_len, struct wmi_phyerr_ev_arg *arg)
0359 {
0360 if (!ar->wmi.ops->pull_phyerr)
0361 return -EOPNOTSUPP;
0362
0363 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
0364 }
0365
0366 static inline int
0367 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
0368 struct wmi_svc_rdy_ev_arg *arg)
0369 {
0370 if (!ar->wmi.ops->pull_svc_rdy)
0371 return -EOPNOTSUPP;
0372
0373 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
0374 }
0375
0376 static inline int
0377 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
0378 struct wmi_rdy_ev_arg *arg)
0379 {
0380 if (!ar->wmi.ops->pull_rdy)
0381 return -EOPNOTSUPP;
0382
0383 return ar->wmi.ops->pull_rdy(ar, skb, arg);
0384 }
0385
0386 static inline int
0387 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
0388 struct wmi_svc_avail_ev_arg *arg)
0389 {
0390 if (!ar->wmi.ops->pull_svc_avail)
0391 return -EOPNOTSUPP;
0392 return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
0393 }
0394
0395 static inline int
0396 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
0397 struct ath10k_fw_stats *stats)
0398 {
0399 if (!ar->wmi.ops->pull_fw_stats)
0400 return -EOPNOTSUPP;
0401
0402 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
0403 }
0404
0405 static inline int
0406 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
0407 struct wmi_roam_ev_arg *arg)
0408 {
0409 if (!ar->wmi.ops->pull_roam_ev)
0410 return -EOPNOTSUPP;
0411
0412 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
0413 }
0414
0415 static inline int
0416 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
0417 struct wmi_wow_ev_arg *arg)
0418 {
0419 if (!ar->wmi.ops->pull_wow_event)
0420 return -EOPNOTSUPP;
0421
0422 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
0423 }
0424
0425 static inline int
0426 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
0427 struct wmi_echo_ev_arg *arg)
0428 {
0429 if (!ar->wmi.ops->pull_echo_ev)
0430 return -EOPNOTSUPP;
0431
0432 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
0433 }
0434
0435 static inline int
0436 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
0437 struct wmi_dfs_status_ev_arg *arg)
0438 {
0439 if (!ar->wmi.ops->pull_dfs_status_ev)
0440 return -EOPNOTSUPP;
0441
0442 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
0443 }
0444
0445 static inline enum wmi_txbf_conf
0446 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
0447 {
0448 if (!ar->wmi.ops->get_txbf_conf_scheme)
0449 return WMI_TXBF_CONF_UNSUPPORTED;
0450
0451 return ar->wmi.ops->get_txbf_conf_scheme(ar);
0452 }
0453
0454 static inline int
0455 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
0456 {
0457 if (!ar->wmi.ops->cleanup_mgmt_tx_send)
0458 return -EOPNOTSUPP;
0459
0460 return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
0461 }
0462
0463 static inline int
0464 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
0465 dma_addr_t paddr)
0466 {
0467 struct sk_buff *skb;
0468 int ret;
0469
0470 if (!ar->wmi.ops->gen_mgmt_tx_send)
0471 return -EOPNOTSUPP;
0472
0473 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
0474 if (IS_ERR(skb))
0475 return PTR_ERR(skb);
0476
0477 ret = ath10k_wmi_cmd_send(ar, skb,
0478 ar->wmi.cmd->mgmt_tx_send_cmdid);
0479 if (ret)
0480 return ret;
0481
0482 return 0;
0483 }
0484
0485 static inline int
0486 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
0487 {
0488 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
0489 struct sk_buff *skb;
0490 int ret;
0491
0492 if (!ar->wmi.ops->gen_mgmt_tx)
0493 return -EOPNOTSUPP;
0494
0495 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
0496 if (IS_ERR(skb))
0497 return PTR_ERR(skb);
0498
0499 ret = ath10k_wmi_cmd_send(ar, skb,
0500 ar->wmi.cmd->mgmt_tx_cmdid);
0501 if (ret)
0502 return ret;
0503
0504
0505
0506
0507 info->flags |= IEEE80211_TX_STAT_ACK;
0508 ieee80211_tx_status_irqsafe(ar->hw, msdu);
0509
0510 return 0;
0511 }
0512
0513 static inline int
0514 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
0515 u16 ctl2g, u16 ctl5g,
0516 enum wmi_dfs_region dfs_reg)
0517 {
0518 struct sk_buff *skb;
0519
0520 if (!ar->wmi.ops->gen_pdev_set_rd)
0521 return -EOPNOTSUPP;
0522
0523 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
0524 dfs_reg);
0525 if (IS_ERR(skb))
0526 return PTR_ERR(skb);
0527
0528 return ath10k_wmi_cmd_send(ar, skb,
0529 ar->wmi.cmd->pdev_set_regdomain_cmdid);
0530 }
0531
0532 static inline int
0533 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
0534 {
0535 struct sk_buff *skb;
0536
0537 if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
0538 return -EOPNOTSUPP;
0539
0540 skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
0541 if (IS_ERR(skb))
0542 return PTR_ERR(skb);
0543
0544 return ath10k_wmi_cmd_send(ar, skb,
0545 ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
0546 }
0547
0548 static inline int
0549 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
0550 {
0551 struct sk_buff *skb;
0552
0553 if (!ar->wmi.ops->gen_pdev_suspend)
0554 return -EOPNOTSUPP;
0555
0556 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
0557 if (IS_ERR(skb))
0558 return PTR_ERR(skb);
0559
0560 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
0561 }
0562
0563 static inline int
0564 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
0565 {
0566 struct sk_buff *skb;
0567
0568 if (!ar->wmi.ops->gen_pdev_resume)
0569 return -EOPNOTSUPP;
0570
0571 skb = ar->wmi.ops->gen_pdev_resume(ar);
0572 if (IS_ERR(skb))
0573 return PTR_ERR(skb);
0574
0575 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
0576 }
0577
0578 static inline int
0579 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
0580 {
0581 struct sk_buff *skb;
0582
0583 if (!ar->wmi.ops->gen_pdev_set_param)
0584 return -EOPNOTSUPP;
0585
0586 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
0587 if (IS_ERR(skb))
0588 return PTR_ERR(skb);
0589
0590 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
0591 }
0592
0593 static inline int
0594 ath10k_wmi_cmd_init(struct ath10k *ar)
0595 {
0596 struct sk_buff *skb;
0597
0598 if (!ar->wmi.ops->gen_init)
0599 return -EOPNOTSUPP;
0600
0601 skb = ar->wmi.ops->gen_init(ar);
0602 if (IS_ERR(skb))
0603 return PTR_ERR(skb);
0604
0605 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
0606 }
0607
0608 static inline int
0609 ath10k_wmi_start_scan(struct ath10k *ar,
0610 const struct wmi_start_scan_arg *arg)
0611 {
0612 struct sk_buff *skb;
0613
0614 if (!ar->wmi.ops->gen_start_scan)
0615 return -EOPNOTSUPP;
0616
0617 skb = ar->wmi.ops->gen_start_scan(ar, arg);
0618 if (IS_ERR(skb))
0619 return PTR_ERR(skb);
0620
0621 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
0622 }
0623
0624 static inline int
0625 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
0626 {
0627 struct sk_buff *skb;
0628
0629 if (!ar->wmi.ops->gen_stop_scan)
0630 return -EOPNOTSUPP;
0631
0632 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
0633 if (IS_ERR(skb))
0634 return PTR_ERR(skb);
0635
0636 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
0637 }
0638
0639 static inline int
0640 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
0641 enum wmi_vdev_type type,
0642 enum wmi_vdev_subtype subtype,
0643 const u8 macaddr[ETH_ALEN])
0644 {
0645 struct sk_buff *skb;
0646
0647 if (!ar->wmi.ops->gen_vdev_create)
0648 return -EOPNOTSUPP;
0649
0650 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
0651 if (IS_ERR(skb))
0652 return PTR_ERR(skb);
0653
0654 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
0655 }
0656
0657 static inline int
0658 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
0659 {
0660 struct sk_buff *skb;
0661
0662 if (!ar->wmi.ops->gen_vdev_delete)
0663 return -EOPNOTSUPP;
0664
0665 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
0666 if (IS_ERR(skb))
0667 return PTR_ERR(skb);
0668
0669 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
0670 }
0671
0672 static inline int
0673 ath10k_wmi_vdev_start(struct ath10k *ar,
0674 const struct wmi_vdev_start_request_arg *arg)
0675 {
0676 struct sk_buff *skb;
0677
0678 if (!ar->wmi.ops->gen_vdev_start)
0679 return -EOPNOTSUPP;
0680
0681 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
0682 if (IS_ERR(skb))
0683 return PTR_ERR(skb);
0684
0685 return ath10k_wmi_cmd_send(ar, skb,
0686 ar->wmi.cmd->vdev_start_request_cmdid);
0687 }
0688
0689 static inline int
0690 ath10k_wmi_vdev_restart(struct ath10k *ar,
0691 const struct wmi_vdev_start_request_arg *arg)
0692 {
0693 struct sk_buff *skb;
0694
0695 if (!ar->wmi.ops->gen_vdev_start)
0696 return -EOPNOTSUPP;
0697
0698 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
0699 if (IS_ERR(skb))
0700 return PTR_ERR(skb);
0701
0702 return ath10k_wmi_cmd_send(ar, skb,
0703 ar->wmi.cmd->vdev_restart_request_cmdid);
0704 }
0705
0706 static inline int
0707 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
0708 {
0709 struct sk_buff *skb;
0710
0711 if (!ar->wmi.ops->gen_vdev_stop)
0712 return -EOPNOTSUPP;
0713
0714 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
0715 if (IS_ERR(skb))
0716 return PTR_ERR(skb);
0717
0718 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
0719 }
0720
0721 static inline int
0722 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
0723 {
0724 struct sk_buff *skb;
0725
0726 if (!ar->wmi.ops->gen_vdev_up)
0727 return -EOPNOTSUPP;
0728
0729 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
0730 if (IS_ERR(skb))
0731 return PTR_ERR(skb);
0732
0733 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
0734 }
0735
0736 static inline int
0737 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
0738 {
0739 struct sk_buff *skb;
0740
0741 if (!ar->wmi.ops->gen_vdev_down)
0742 return -EOPNOTSUPP;
0743
0744 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
0745 if (IS_ERR(skb))
0746 return PTR_ERR(skb);
0747
0748 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
0749 }
0750
0751 static inline int
0752 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
0753 u32 param_value)
0754 {
0755 struct sk_buff *skb;
0756
0757 if (!ar->wmi.ops->gen_vdev_set_param)
0758 return -EOPNOTSUPP;
0759
0760 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
0761 param_value);
0762 if (IS_ERR(skb))
0763 return PTR_ERR(skb);
0764
0765 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
0766 }
0767
0768 static inline int
0769 ath10k_wmi_vdev_install_key(struct ath10k *ar,
0770 const struct wmi_vdev_install_key_arg *arg)
0771 {
0772 struct sk_buff *skb;
0773
0774 if (!ar->wmi.ops->gen_vdev_install_key)
0775 return -EOPNOTSUPP;
0776
0777 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
0778 if (IS_ERR(skb))
0779 return PTR_ERR(skb);
0780
0781 return ath10k_wmi_cmd_send(ar, skb,
0782 ar->wmi.cmd->vdev_install_key_cmdid);
0783 }
0784
0785 static inline int
0786 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
0787 const struct wmi_vdev_spectral_conf_arg *arg)
0788 {
0789 struct sk_buff *skb;
0790 u32 cmd_id;
0791
0792 if (!ar->wmi.ops->gen_vdev_spectral_conf)
0793 return -EOPNOTSUPP;
0794
0795 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
0796 if (IS_ERR(skb))
0797 return PTR_ERR(skb);
0798
0799 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
0800 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
0801 }
0802
0803 static inline int
0804 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
0805 u32 enable)
0806 {
0807 struct sk_buff *skb;
0808 u32 cmd_id;
0809
0810 if (!ar->wmi.ops->gen_vdev_spectral_enable)
0811 return -EOPNOTSUPP;
0812
0813 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
0814 enable);
0815 if (IS_ERR(skb))
0816 return PTR_ERR(skb);
0817
0818 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
0819 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
0820 }
0821
0822 static inline int
0823 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
0824 const u8 peer_addr[ETH_ALEN],
0825 const struct wmi_sta_uapsd_auto_trig_arg *args,
0826 u32 num_ac)
0827 {
0828 struct sk_buff *skb;
0829 u32 cmd_id;
0830
0831 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
0832 return -EOPNOTSUPP;
0833
0834 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
0835 num_ac);
0836 if (IS_ERR(skb))
0837 return PTR_ERR(skb);
0838
0839 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
0840 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
0841 }
0842
0843 static inline int
0844 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
0845 const struct wmi_wmm_params_all_arg *arg)
0846 {
0847 struct sk_buff *skb;
0848 u32 cmd_id;
0849
0850 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
0851 if (IS_ERR(skb))
0852 return PTR_ERR(skb);
0853
0854 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
0855 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
0856 }
0857
0858 static inline int
0859 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
0860 const u8 peer_addr[ETH_ALEN],
0861 enum wmi_peer_type peer_type)
0862 {
0863 struct sk_buff *skb;
0864
0865 if (!ar->wmi.ops->gen_peer_create)
0866 return -EOPNOTSUPP;
0867
0868 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
0869 if (IS_ERR(skb))
0870 return PTR_ERR(skb);
0871
0872 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
0873 }
0874
0875 static inline int
0876 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
0877 const u8 peer_addr[ETH_ALEN])
0878 {
0879 struct sk_buff *skb;
0880
0881 if (!ar->wmi.ops->gen_peer_delete)
0882 return -EOPNOTSUPP;
0883
0884 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
0885 if (IS_ERR(skb))
0886 return PTR_ERR(skb);
0887
0888 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
0889 }
0890
0891 static inline int
0892 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
0893 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
0894 {
0895 struct sk_buff *skb;
0896
0897 if (!ar->wmi.ops->gen_peer_flush)
0898 return -EOPNOTSUPP;
0899
0900 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
0901 if (IS_ERR(skb))
0902 return PTR_ERR(skb);
0903
0904 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
0905 }
0906
0907 static inline int
0908 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
0909 enum wmi_peer_param param_id, u32 param_value)
0910 {
0911 struct sk_buff *skb;
0912
0913 if (!ar->wmi.ops->gen_peer_set_param)
0914 return -EOPNOTSUPP;
0915
0916 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
0917 param_value);
0918 if (IS_ERR(skb))
0919 return PTR_ERR(skb);
0920
0921 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
0922 }
0923
0924 static inline int
0925 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
0926 enum wmi_sta_ps_mode psmode)
0927 {
0928 struct sk_buff *skb;
0929
0930 if (!ar->wmi.ops->gen_set_psmode)
0931 return -EOPNOTSUPP;
0932
0933 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
0934 if (IS_ERR(skb))
0935 return PTR_ERR(skb);
0936
0937 return ath10k_wmi_cmd_send(ar, skb,
0938 ar->wmi.cmd->sta_powersave_mode_cmdid);
0939 }
0940
0941 static inline int
0942 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
0943 enum wmi_sta_powersave_param param_id, u32 value)
0944 {
0945 struct sk_buff *skb;
0946
0947 if (!ar->wmi.ops->gen_set_sta_ps)
0948 return -EOPNOTSUPP;
0949
0950 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
0951 if (IS_ERR(skb))
0952 return PTR_ERR(skb);
0953
0954 return ath10k_wmi_cmd_send(ar, skb,
0955 ar->wmi.cmd->sta_powersave_param_cmdid);
0956 }
0957
0958 static inline int
0959 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
0960 enum wmi_ap_ps_peer_param param_id, u32 value)
0961 {
0962 struct sk_buff *skb;
0963
0964 if (!ar->wmi.ops->gen_set_ap_ps)
0965 return -EOPNOTSUPP;
0966
0967 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
0968 if (IS_ERR(skb))
0969 return PTR_ERR(skb);
0970
0971 return ath10k_wmi_cmd_send(ar, skb,
0972 ar->wmi.cmd->ap_ps_peer_param_cmdid);
0973 }
0974
0975 static inline int
0976 ath10k_wmi_scan_chan_list(struct ath10k *ar,
0977 const struct wmi_scan_chan_list_arg *arg)
0978 {
0979 struct sk_buff *skb;
0980
0981 if (!ar->wmi.ops->gen_scan_chan_list)
0982 return -EOPNOTSUPP;
0983
0984 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
0985 if (IS_ERR(skb))
0986 return PTR_ERR(skb);
0987
0988 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
0989 }
0990
0991 static inline int
0992 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
0993 {
0994 struct sk_buff *skb;
0995 u32 prob_req_oui;
0996
0997 prob_req_oui = (((u32)mac_addr[0]) << 16) |
0998 (((u32)mac_addr[1]) << 8) | mac_addr[2];
0999
1000 if (!ar->wmi.ops->gen_scan_prob_req_oui)
1001 return -EOPNOTSUPP;
1002
1003 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
1004 if (IS_ERR(skb))
1005 return PTR_ERR(skb);
1006
1007 return ath10k_wmi_cmd_send(ar, skb,
1008 ar->wmi.cmd->scan_prob_req_oui_cmdid);
1009 }
1010
1011 static inline int
1012 ath10k_wmi_peer_assoc(struct ath10k *ar,
1013 const struct wmi_peer_assoc_complete_arg *arg)
1014 {
1015 struct sk_buff *skb;
1016
1017 if (!ar->wmi.ops->gen_peer_assoc)
1018 return -EOPNOTSUPP;
1019
1020 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1021 if (IS_ERR(skb))
1022 return PTR_ERR(skb);
1023
1024 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1025 }
1026
1027 static inline int
1028 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1029 const void *bcn, size_t bcn_len,
1030 u32 bcn_paddr, bool dtim_zero,
1031 bool deliver_cab)
1032 {
1033 struct sk_buff *skb;
1034 int ret;
1035
1036 if (!ar->wmi.ops->gen_beacon_dma)
1037 return -EOPNOTSUPP;
1038
1039 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1040 dtim_zero, deliver_cab);
1041 if (IS_ERR(skb))
1042 return PTR_ERR(skb);
1043
1044 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1045 ar->wmi.cmd->pdev_send_bcn_cmdid);
1046 if (ret) {
1047 dev_kfree_skb(skb);
1048 return ret;
1049 }
1050
1051 return 0;
1052 }
1053
1054 static inline int
1055 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1056 const struct wmi_wmm_params_all_arg *arg)
1057 {
1058 struct sk_buff *skb;
1059
1060 if (!ar->wmi.ops->gen_pdev_set_wmm)
1061 return -EOPNOTSUPP;
1062
1063 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1064 if (IS_ERR(skb))
1065 return PTR_ERR(skb);
1066
1067 return ath10k_wmi_cmd_send(ar, skb,
1068 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1069 }
1070
1071 static inline int
1072 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1073 {
1074 struct sk_buff *skb;
1075
1076 if (!ar->wmi.ops->gen_request_stats)
1077 return -EOPNOTSUPP;
1078
1079 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1080 if (IS_ERR(skb))
1081 return PTR_ERR(skb);
1082
1083 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1084 }
1085
1086 static inline int
1087 ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
1088 u32 vdev_id,
1089 enum wmi_peer_stats_info_request_type type,
1090 u8 *addr,
1091 u32 reset)
1092 {
1093 struct sk_buff *skb;
1094
1095 if (!ar->wmi.ops->gen_request_peer_stats_info)
1096 return -EOPNOTSUPP;
1097
1098 skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
1099 vdev_id,
1100 type,
1101 addr,
1102 reset);
1103 if (IS_ERR(skb))
1104 return PTR_ERR(skb);
1105
1106 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
1107 }
1108
1109 static inline int
1110 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1111 enum wmi_force_fw_hang_type type, u32 delay_ms)
1112 {
1113 struct sk_buff *skb;
1114
1115 if (!ar->wmi.ops->gen_force_fw_hang)
1116 return -EOPNOTSUPP;
1117
1118 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1119 if (IS_ERR(skb))
1120 return PTR_ERR(skb);
1121
1122 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1123 }
1124
1125 static inline int
1126 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1127 {
1128 struct sk_buff *skb;
1129
1130 if (!ar->wmi.ops->gen_dbglog_cfg)
1131 return -EOPNOTSUPP;
1132
1133 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1134 if (IS_ERR(skb))
1135 return PTR_ERR(skb);
1136
1137 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1138 }
1139
1140 static inline int
1141 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1142 {
1143 struct sk_buff *skb;
1144
1145 if (!ar->wmi.ops->gen_pktlog_enable)
1146 return -EOPNOTSUPP;
1147
1148 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1149 if (IS_ERR(skb))
1150 return PTR_ERR(skb);
1151
1152 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1153 }
1154
1155 static inline int
1156 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1157 {
1158 struct sk_buff *skb;
1159
1160 if (!ar->wmi.ops->gen_pktlog_disable)
1161 return -EOPNOTSUPP;
1162
1163 skb = ar->wmi.ops->gen_pktlog_disable(ar);
1164 if (IS_ERR(skb))
1165 return PTR_ERR(skb);
1166
1167 return ath10k_wmi_cmd_send(ar, skb,
1168 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1169 }
1170
1171 static inline int
1172 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1173 u32 next_offset, u32 enabled)
1174 {
1175 struct sk_buff *skb;
1176
1177 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1178 return -EOPNOTSUPP;
1179
1180 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1181 next_offset, enabled);
1182 if (IS_ERR(skb))
1183 return PTR_ERR(skb);
1184
1185 return ath10k_wmi_cmd_send(ar, skb,
1186 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1187 }
1188
1189 static inline int
1190 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1191 {
1192 struct sk_buff *skb;
1193
1194 if (!ar->wmi.ops->gen_pdev_get_temperature)
1195 return -EOPNOTSUPP;
1196
1197 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1198 if (IS_ERR(skb))
1199 return PTR_ERR(skb);
1200
1201 return ath10k_wmi_cmd_send(ar, skb,
1202 ar->wmi.cmd->pdev_get_temperature_cmdid);
1203 }
1204
1205 static inline int
1206 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1207 {
1208 struct sk_buff *skb;
1209
1210 if (!ar->wmi.ops->gen_addba_clear_resp)
1211 return -EOPNOTSUPP;
1212
1213 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1214 if (IS_ERR(skb))
1215 return PTR_ERR(skb);
1216
1217 return ath10k_wmi_cmd_send(ar, skb,
1218 ar->wmi.cmd->addba_clear_resp_cmdid);
1219 }
1220
1221 static inline int
1222 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1223 u32 tid, u32 buf_size)
1224 {
1225 struct sk_buff *skb;
1226
1227 if (!ar->wmi.ops->gen_addba_send)
1228 return -EOPNOTSUPP;
1229
1230 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1231 if (IS_ERR(skb))
1232 return PTR_ERR(skb);
1233
1234 return ath10k_wmi_cmd_send(ar, skb,
1235 ar->wmi.cmd->addba_send_cmdid);
1236 }
1237
1238 static inline int
1239 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1240 u32 tid, u32 status)
1241 {
1242 struct sk_buff *skb;
1243
1244 if (!ar->wmi.ops->gen_addba_set_resp)
1245 return -EOPNOTSUPP;
1246
1247 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1248 if (IS_ERR(skb))
1249 return PTR_ERR(skb);
1250
1251 return ath10k_wmi_cmd_send(ar, skb,
1252 ar->wmi.cmd->addba_set_resp_cmdid);
1253 }
1254
1255 static inline int
1256 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1257 u32 tid, u32 initiator, u32 reason)
1258 {
1259 struct sk_buff *skb;
1260
1261 if (!ar->wmi.ops->gen_delba_send)
1262 return -EOPNOTSUPP;
1263
1264 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1265 reason);
1266 if (IS_ERR(skb))
1267 return PTR_ERR(skb);
1268
1269 return ath10k_wmi_cmd_send(ar, skb,
1270 ar->wmi.cmd->delba_send_cmdid);
1271 }
1272
1273 static inline int
1274 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1275 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1276 void *prb_ies, size_t prb_ies_len)
1277 {
1278 struct sk_buff *skb;
1279
1280 if (!ar->wmi.ops->gen_bcn_tmpl)
1281 return -EOPNOTSUPP;
1282
1283 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1284 prb_caps, prb_erp, prb_ies,
1285 prb_ies_len);
1286 if (IS_ERR(skb))
1287 return PTR_ERR(skb);
1288
1289 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1290 }
1291
1292 static inline int
1293 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1294 {
1295 struct sk_buff *skb;
1296
1297 if (!ar->wmi.ops->gen_prb_tmpl)
1298 return -EOPNOTSUPP;
1299
1300 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1301 if (IS_ERR(skb))
1302 return PTR_ERR(skb);
1303
1304 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1305 }
1306
1307 static inline int
1308 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1309 {
1310 struct sk_buff *skb;
1311
1312 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1313 return -EOPNOTSUPP;
1314
1315 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1316 if (IS_ERR(skb))
1317 return PTR_ERR(skb);
1318
1319 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1320 }
1321
1322 static inline int
1323 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1324 const struct wmi_sta_keepalive_arg *arg)
1325 {
1326 struct sk_buff *skb;
1327 u32 cmd_id;
1328
1329 if (!ar->wmi.ops->gen_sta_keepalive)
1330 return -EOPNOTSUPP;
1331
1332 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1333 if (IS_ERR(skb))
1334 return PTR_ERR(skb);
1335
1336 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1337 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1338 }
1339
1340 static inline int
1341 ath10k_wmi_wow_enable(struct ath10k *ar)
1342 {
1343 struct sk_buff *skb;
1344 u32 cmd_id;
1345
1346 if (!ar->wmi.ops->gen_wow_enable)
1347 return -EOPNOTSUPP;
1348
1349 skb = ar->wmi.ops->gen_wow_enable(ar);
1350 if (IS_ERR(skb))
1351 return PTR_ERR(skb);
1352
1353 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1354 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1355 }
1356
1357 static inline int
1358 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1359 enum wmi_wow_wakeup_event event,
1360 u32 enable)
1361 {
1362 struct sk_buff *skb;
1363 u32 cmd_id;
1364
1365 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1366 return -EOPNOTSUPP;
1367
1368 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1369 if (IS_ERR(skb))
1370 return PTR_ERR(skb);
1371
1372 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1373 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1374 }
1375
1376 static inline int
1377 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1378 {
1379 struct sk_buff *skb;
1380 u32 cmd_id;
1381
1382 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1383 return -EOPNOTSUPP;
1384
1385 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1386 if (IS_ERR(skb))
1387 return PTR_ERR(skb);
1388
1389 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1390 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1391 }
1392
1393 static inline int
1394 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1395 const u8 *pattern, const u8 *mask,
1396 int pattern_len, int pattern_offset)
1397 {
1398 struct sk_buff *skb;
1399 u32 cmd_id;
1400
1401 if (!ar->wmi.ops->gen_wow_add_pattern)
1402 return -EOPNOTSUPP;
1403
1404 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1405 pattern, mask, pattern_len,
1406 pattern_offset);
1407 if (IS_ERR(skb))
1408 return PTR_ERR(skb);
1409
1410 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1411 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1412 }
1413
1414 static inline int
1415 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1416 {
1417 struct sk_buff *skb;
1418 u32 cmd_id;
1419
1420 if (!ar->wmi.ops->gen_wow_del_pattern)
1421 return -EOPNOTSUPP;
1422
1423 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1424 if (IS_ERR(skb))
1425 return PTR_ERR(skb);
1426
1427 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1428 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1429 }
1430
1431 static inline int
1432 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1433 struct wmi_pno_scan_req *pno_scan)
1434 {
1435 struct sk_buff *skb;
1436 u32 cmd_id;
1437
1438 if (!ar->wmi.ops->gen_wow_config_pno)
1439 return -EOPNOTSUPP;
1440
1441 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1442 if (IS_ERR(skb))
1443 return PTR_ERR(skb);
1444
1445 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1446 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1447 }
1448
1449 static inline int
1450 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1451 enum wmi_tdls_state state)
1452 {
1453 struct sk_buff *skb;
1454
1455 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1456 return -EOPNOTSUPP;
1457
1458 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1459 if (IS_ERR(skb))
1460 return PTR_ERR(skb);
1461
1462 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1463 }
1464
1465 static inline int
1466 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1467 const struct wmi_tdls_peer_update_cmd_arg *arg,
1468 const struct wmi_tdls_peer_capab_arg *cap,
1469 const struct wmi_channel_arg *chan)
1470 {
1471 struct sk_buff *skb;
1472
1473 if (!ar->wmi.ops->gen_tdls_peer_update)
1474 return -EOPNOTSUPP;
1475
1476 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1477 if (IS_ERR(skb))
1478 return PTR_ERR(skb);
1479
1480 return ath10k_wmi_cmd_send(ar, skb,
1481 ar->wmi.cmd->tdls_peer_update_cmdid);
1482 }
1483
1484 static inline int
1485 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1486 {
1487 struct sk_buff *skb;
1488
1489 if (!ar->wmi.ops->gen_adaptive_qcs)
1490 return -EOPNOTSUPP;
1491
1492 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1493 if (IS_ERR(skb))
1494 return PTR_ERR(skb);
1495
1496 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1497 }
1498
1499 static inline int
1500 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1501 {
1502 struct sk_buff *skb;
1503
1504 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1505 return -EOPNOTSUPP;
1506
1507 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1508
1509 if (IS_ERR(skb))
1510 return PTR_ERR(skb);
1511
1512 return ath10k_wmi_cmd_send(ar, skb,
1513 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1514 }
1515
1516 static inline int
1517 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1518 char *buf)
1519 {
1520 if (!ar->wmi.ops->fw_stats_fill)
1521 return -EOPNOTSUPP;
1522
1523 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1524 return 0;
1525 }
1526
1527 static inline int
1528 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1529 u32 detect_level, u32 detect_margin)
1530 {
1531 struct sk_buff *skb;
1532
1533 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1534 return -EOPNOTSUPP;
1535
1536 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1537 detect_level,
1538 detect_margin);
1539
1540 if (IS_ERR(skb))
1541 return PTR_ERR(skb);
1542
1543 return ath10k_wmi_cmd_send(ar, skb,
1544 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1545 }
1546
1547 static inline int
1548 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1549 enum wmi_host_platform_type type,
1550 u32 fw_feature_bitmap)
1551 {
1552 struct sk_buff *skb;
1553
1554 if (!ar->wmi.ops->ext_resource_config)
1555 return -EOPNOTSUPP;
1556
1557 skb = ar->wmi.ops->ext_resource_config(ar, type,
1558 fw_feature_bitmap);
1559
1560 if (IS_ERR(skb))
1561 return PTR_ERR(skb);
1562
1563 return ath10k_wmi_cmd_send(ar, skb,
1564 ar->wmi.cmd->ext_resource_cfg_cmdid);
1565 }
1566
1567 static inline int
1568 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1569 {
1570 if (!ar->wmi.ops->get_vdev_subtype)
1571 return -EOPNOTSUPP;
1572
1573 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1574 }
1575
1576 static inline int
1577 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1578 enum wmi_bss_survey_req_type type)
1579 {
1580 struct ath10k_wmi *wmi = &ar->wmi;
1581 struct sk_buff *skb;
1582
1583 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1584 return -EOPNOTSUPP;
1585
1586 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1587 if (IS_ERR(skb))
1588 return PTR_ERR(skb);
1589
1590 return ath10k_wmi_cmd_send(ar, skb,
1591 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1592 }
1593
1594 static inline int
1595 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1596 {
1597 struct ath10k_wmi *wmi = &ar->wmi;
1598 struct sk_buff *skb;
1599
1600 if (!wmi->ops->gen_echo)
1601 return -EOPNOTSUPP;
1602
1603 skb = wmi->ops->gen_echo(ar, value);
1604 if (IS_ERR(skb))
1605 return PTR_ERR(skb);
1606
1607 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1608 }
1609
1610 static inline int
1611 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1612 {
1613 struct sk_buff *skb;
1614
1615 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1616 return -EOPNOTSUPP;
1617
1618 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1619
1620 if (IS_ERR(skb))
1621 return PTR_ERR(skb);
1622
1623 return ath10k_wmi_cmd_send(ar, skb,
1624 ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1625 }
1626
1627 static inline int
1628 ath10k_wmi_report_radar_found(struct ath10k *ar,
1629 const struct ath10k_radar_found_info *arg)
1630 {
1631 struct sk_buff *skb;
1632
1633 if (!ar->wmi.ops->gen_radar_found)
1634 return -EOPNOTSUPP;
1635
1636 skb = ar->wmi.ops->gen_radar_found(ar, arg);
1637 if (IS_ERR(skb))
1638 return PTR_ERR(skb);
1639
1640 return ath10k_wmi_cmd_send(ar, skb,
1641 ar->wmi.cmd->radar_found_cmdid);
1642 }
1643
1644 static inline int
1645 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1646 const struct wmi_bb_timing_cfg_arg *arg)
1647 {
1648 struct sk_buff *skb;
1649
1650 if (!ar->wmi.ops->gen_bb_timing)
1651 return -EOPNOTSUPP;
1652
1653 skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1654
1655 if (IS_ERR(skb))
1656 return PTR_ERR(skb);
1657
1658 return ath10k_wmi_cmd_send(ar, skb,
1659 ar->wmi.cmd->set_bb_timing_cmdid);
1660 }
1661
1662 static inline int
1663 ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
1664 const struct wmi_per_peer_per_tid_cfg_arg *arg)
1665 {
1666 struct sk_buff *skb;
1667
1668 if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
1669 return -EOPNOTSUPP;
1670
1671 skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
1672 if (IS_ERR(skb))
1673 return PTR_ERR(skb);
1674
1675 return ath10k_wmi_cmd_send(ar, skb,
1676 ar->wmi.cmd->per_peer_per_tid_config_cmdid);
1677 }
1678 #endif