Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
0002 /*
0003  * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
0004  * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
0005  * Copyright (C) 2015-2017 Intel Deutschland GmbH
0006  */
0007 #include <net/mac80211.h>
0008 
0009 #include "iwl-debug.h"
0010 #include "iwl-io.h"
0011 #include "iwl-prph.h"
0012 #include "iwl-csr.h"
0013 #include "mvm.h"
0014 #include "fw/api/rs.h"
0015 #include "fw/img.h"
0016 
0017 /*
0018  * Will return 0 even if the cmd failed when RFKILL is asserted unless
0019  * CMD_WANT_SKB is set in cmd->flags.
0020  */
0021 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
0022 {
0023     int ret;
0024 
0025 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
0026     if (WARN_ON(mvm->d3_test_active))
0027         return -EIO;
0028 #endif
0029 
0030     /*
0031      * Synchronous commands from this op-mode must hold
0032      * the mutex, this ensures we don't try to send two
0033      * (or more) synchronous commands at a time.
0034      */
0035     if (!(cmd->flags & CMD_ASYNC))
0036         lockdep_assert_held(&mvm->mutex);
0037 
0038     ret = iwl_trans_send_cmd(mvm->trans, cmd);
0039 
0040     /*
0041      * If the caller wants the SKB, then don't hide any problems, the
0042      * caller might access the response buffer which will be NULL if
0043      * the command failed.
0044      */
0045     if (cmd->flags & CMD_WANT_SKB)
0046         return ret;
0047 
0048     /*
0049      * Silently ignore failures if RFKILL is asserted or
0050      * we are in suspend\resume process
0051      */
0052     if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
0053         return 0;
0054     return ret;
0055 }
0056 
0057 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
0058              u32 flags, u16 len, const void *data)
0059 {
0060     struct iwl_host_cmd cmd = {
0061         .id = id,
0062         .len = { len, },
0063         .data = { data, },
0064         .flags = flags,
0065     };
0066 
0067     return iwl_mvm_send_cmd(mvm, &cmd);
0068 }
0069 
0070 /*
0071  * We assume that the caller set the status to the success value
0072  */
0073 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
0074                 u32 *status)
0075 {
0076     struct iwl_rx_packet *pkt;
0077     struct iwl_cmd_response *resp;
0078     int ret, resp_len;
0079 
0080     lockdep_assert_held(&mvm->mutex);
0081 
0082 #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
0083     if (WARN_ON(mvm->d3_test_active))
0084         return -EIO;
0085 #endif
0086 
0087     /*
0088      * Only synchronous commands can wait for status,
0089      * we use WANT_SKB so the caller can't.
0090      */
0091     if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
0092               "cmd flags %x", cmd->flags))
0093         return -EINVAL;
0094 
0095     cmd->flags |= CMD_WANT_SKB;
0096 
0097     ret = iwl_trans_send_cmd(mvm->trans, cmd);
0098     if (ret == -ERFKILL) {
0099         /*
0100          * The command failed because of RFKILL, don't update
0101          * the status, leave it as success and return 0.
0102          */
0103         return 0;
0104     } else if (ret) {
0105         return ret;
0106     }
0107 
0108     pkt = cmd->resp_pkt;
0109 
0110     resp_len = iwl_rx_packet_payload_len(pkt);
0111     if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
0112         ret = -EIO;
0113         goto out_free_resp;
0114     }
0115 
0116     resp = (void *)pkt->data;
0117     *status = le32_to_cpu(resp->status);
0118  out_free_resp:
0119     iwl_free_resp(cmd);
0120     return ret;
0121 }
0122 
0123 /*
0124  * We assume that the caller set the status to the sucess value
0125  */
0126 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
0127                 const void *data, u32 *status)
0128 {
0129     struct iwl_host_cmd cmd = {
0130         .id = id,
0131         .len = { len, },
0132         .data = { data, },
0133     };
0134 
0135     return iwl_mvm_send_cmd_status(mvm, &cmd, status);
0136 }
0137 
0138 int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
0139                       enum nl80211_band band)
0140 {
0141     int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
0142     int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
0143     bool is_LB = band == NL80211_BAND_2GHZ;
0144 
0145     if (format == RATE_MCS_LEGACY_OFDM_MSK)
0146         return is_LB ? rate + IWL_FIRST_OFDM_RATE :
0147             rate;
0148 
0149     /* CCK is not allowed in HB */
0150     return is_LB ? rate : -1;
0151 }
0152 
0153 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
0154                     enum nl80211_band band)
0155 {
0156     int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
0157     int idx;
0158     int band_offset = 0;
0159 
0160     /* Legacy rate format, search for match in table */
0161     if (band != NL80211_BAND_2GHZ)
0162         band_offset = IWL_FIRST_OFDM_RATE;
0163     for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
0164         if (iwl_fw_rate_idx_to_plcp(idx) == rate)
0165             return idx - band_offset;
0166 
0167     return -1;
0168 }
0169 
0170 u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
0171 {
0172     if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
0173         /* In the new rate legacy rates are indexed:
0174          * 0 - 3 for CCK and 0 - 7 for OFDM.
0175          */
0176         return (rate_idx >= IWL_FIRST_OFDM_RATE ?
0177             rate_idx - IWL_FIRST_OFDM_RATE :
0178             rate_idx);
0179 
0180     return iwl_fw_rate_idx_to_plcp(rate_idx);
0181 }
0182 
0183 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
0184 {
0185     static const u8 mac80211_ac_to_ucode_ac[] = {
0186         AC_VO,
0187         AC_VI,
0188         AC_BE,
0189         AC_BK
0190     };
0191 
0192     return mac80211_ac_to_ucode_ac[ac];
0193 }
0194 
0195 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
0196 {
0197     struct iwl_rx_packet *pkt = rxb_addr(rxb);
0198     struct iwl_error_resp *err_resp = (void *)pkt->data;
0199 
0200     IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
0201         le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
0202     IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
0203         le16_to_cpu(err_resp->bad_cmd_seq_num),
0204         le32_to_cpu(err_resp->error_service));
0205     IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
0206         le64_to_cpu(err_resp->timestamp));
0207 }
0208 
0209 /*
0210  * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
0211  * The parameter should also be a combination of ANT_[ABC].
0212  */
0213 u8 first_antenna(u8 mask)
0214 {
0215     BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
0216     if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
0217         return BIT(0);
0218     return BIT(ffs(mask) - 1);
0219 }
0220 
0221 #define MAX_ANT_NUM 2
0222 /*
0223  * Toggles between TX antennas to send the probe request on.
0224  * Receives the bitmask of valid TX antennas and the *index* used
0225  * for the last TX, and returns the next valid *index* to use.
0226  * In order to set it in the tx_cmd, must do BIT(idx).
0227  */
0228 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
0229 {
0230     u8 ind = last_idx;
0231     int i;
0232 
0233     for (i = 0; i < MAX_ANT_NUM; i++) {
0234         ind = (ind + 1) % MAX_ANT_NUM;
0235         if (valid & BIT(ind))
0236             return ind;
0237     }
0238 
0239     WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
0240     return last_idx;
0241 }
0242 
0243 /**
0244  * iwl_mvm_send_lq_cmd() - Send link quality command
0245  * @mvm: Driver data.
0246  * @lq: Link quality command to send.
0247  *
0248  * The link quality command is sent as the last step of station creation.
0249  * This is the special case in which init is set and we call a callback in
0250  * this case to clear the state indicating that station creation is in
0251  * progress.
0252  */
0253 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
0254 {
0255     struct iwl_host_cmd cmd = {
0256         .id = LQ_CMD,
0257         .len = { sizeof(struct iwl_lq_cmd), },
0258         .flags = CMD_ASYNC,
0259         .data = { lq, },
0260     };
0261 
0262     if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
0263             iwl_mvm_has_tlc_offload(mvm)))
0264         return -EINVAL;
0265 
0266     return iwl_mvm_send_cmd(mvm, &cmd);
0267 }
0268 
0269 /**
0270  * iwl_mvm_update_smps - Get a request to change the SMPS mode
0271  * @mvm: Driver data.
0272  * @vif: Pointer to the ieee80211_vif structure
0273  * @req_type: The part of the driver who call for a change.
0274  * @smps_request: The request to change the SMPS mode.
0275  *
0276  * Get a requst to change the SMPS mode,
0277  * and change it according to all other requests in the driver.
0278  */
0279 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0280              enum iwl_mvm_smps_type_request req_type,
0281              enum ieee80211_smps_mode smps_request)
0282 {
0283     struct iwl_mvm_vif *mvmvif;
0284     enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
0285     int i;
0286 
0287     lockdep_assert_held(&mvm->mutex);
0288 
0289     /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
0290     if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
0291         return;
0292 
0293     if (vif->type != NL80211_IFTYPE_STATION)
0294         return;
0295 
0296     mvmvif = iwl_mvm_vif_from_mac80211(vif);
0297     mvmvif->smps_requests[req_type] = smps_request;
0298     for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
0299         if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
0300             smps_mode = IEEE80211_SMPS_STATIC;
0301             break;
0302         }
0303         if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
0304             smps_mode = IEEE80211_SMPS_DYNAMIC;
0305     }
0306 
0307     ieee80211_request_smps(vif, 0, smps_mode);
0308 }
0309 
0310 static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
0311                     struct iwl_rx_packet *pkt, void *data)
0312 {
0313     WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
0314 
0315     return true;
0316 }
0317 
0318 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
0319 {
0320     struct iwl_statistics_cmd scmd = {
0321         .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
0322     };
0323 
0324     struct iwl_host_cmd cmd = {
0325         .id = STATISTICS_CMD,
0326         .len[0] = sizeof(scmd),
0327         .data[0] = &scmd,
0328     };
0329     int ret;
0330 
0331     /* From version 15 - STATISTICS_NOTIFICATION, the reply for
0332      * STATISTICS_CMD is empty, and the response is with
0333      * STATISTICS_NOTIFICATION notification
0334      */
0335     if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
0336                     STATISTICS_NOTIFICATION, 0) < 15) {
0337         cmd.flags = CMD_WANT_SKB;
0338 
0339         ret = iwl_mvm_send_cmd(mvm, &cmd);
0340         if (ret)
0341             return ret;
0342 
0343         iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
0344         iwl_free_resp(&cmd);
0345     } else {
0346         struct iwl_notification_wait stats_wait;
0347         static const u16 stats_complete[] = {
0348             STATISTICS_NOTIFICATION,
0349         };
0350 
0351         iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
0352                        stats_complete, ARRAY_SIZE(stats_complete),
0353                        iwl_wait_stats_complete, NULL);
0354 
0355         ret = iwl_mvm_send_cmd(mvm, &cmd);
0356         if (ret) {
0357             iwl_remove_notification(&mvm->notif_wait, &stats_wait);
0358             return ret;
0359         }
0360 
0361         /* 200ms should be enough for FW to collect data from all
0362          * LMACs and send STATISTICS_NOTIFICATION to host
0363          */
0364         ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
0365         if (ret)
0366             return ret;
0367     }
0368 
0369     if (clear)
0370         iwl_mvm_accu_radio_stats(mvm);
0371 
0372     return 0;
0373 }
0374 
0375 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
0376 {
0377     mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
0378     mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
0379     mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
0380     mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
0381 }
0382 
0383 struct iwl_mvm_diversity_iter_data {
0384     struct iwl_mvm_phy_ctxt *ctxt;
0385     bool result;
0386 };
0387 
0388 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
0389                    struct ieee80211_vif *vif)
0390 {
0391     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0392     struct iwl_mvm_diversity_iter_data *data = _data;
0393     int i;
0394 
0395     if (mvmvif->phy_ctxt != data->ctxt)
0396         return;
0397 
0398     for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
0399         if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
0400             mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
0401             data->result = false;
0402             break;
0403         }
0404     }
0405 }
0406 
0407 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
0408                   struct iwl_mvm_phy_ctxt *ctxt)
0409 {
0410     struct iwl_mvm_diversity_iter_data data = {
0411         .ctxt = ctxt,
0412         .result = true,
0413     };
0414 
0415     lockdep_assert_held(&mvm->mutex);
0416 
0417     if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
0418         return false;
0419 
0420     if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
0421         return false;
0422 
0423     if (mvm->cfg->rx_with_siso_diversity)
0424         return false;
0425 
0426     ieee80211_iterate_active_interfaces_atomic(
0427             mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
0428             iwl_mvm_diversity_iter, &data);
0429 
0430     return data.result;
0431 }
0432 
0433 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
0434                   bool low_latency, u16 mac_id)
0435 {
0436     struct iwl_mac_low_latency_cmd cmd = {
0437         .mac_id = cpu_to_le32(mac_id)
0438     };
0439 
0440     if (!fw_has_capa(&mvm->fw->ucode_capa,
0441              IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
0442         return;
0443 
0444     if (low_latency) {
0445         /* currently we don't care about the direction */
0446         cmd.low_latency_rx = 1;
0447         cmd.low_latency_tx = 1;
0448     }
0449 
0450     if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
0451                  0, sizeof(cmd), &cmd))
0452         IWL_ERR(mvm, "Failed to send low latency command\n");
0453 }
0454 
0455 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0456                    bool low_latency,
0457                    enum iwl_mvm_low_latency_cause cause)
0458 {
0459     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0460     int res;
0461     bool prev;
0462 
0463     lockdep_assert_held(&mvm->mutex);
0464 
0465     prev = iwl_mvm_vif_low_latency(mvmvif);
0466     iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
0467 
0468     low_latency = iwl_mvm_vif_low_latency(mvmvif);
0469 
0470     if (low_latency == prev)
0471         return 0;
0472 
0473     iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
0474 
0475     res = iwl_mvm_update_quotas(mvm, false, NULL);
0476     if (res)
0477         return res;
0478 
0479     iwl_mvm_bt_coex_vif_change(mvm);
0480 
0481     return iwl_mvm_power_update_mac(mvm);
0482 }
0483 
0484 struct iwl_mvm_low_latency_iter {
0485     bool result;
0486     bool result_per_band[NUM_NL80211_BANDS];
0487 };
0488 
0489 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
0490 {
0491     struct iwl_mvm_low_latency_iter *result = _data;
0492     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0493     enum nl80211_band band;
0494 
0495     if (iwl_mvm_vif_low_latency(mvmvif)) {
0496         result->result = true;
0497 
0498         if (!mvmvif->phy_ctxt)
0499             return;
0500 
0501         band = mvmvif->phy_ctxt->channel->band;
0502         result->result_per_band[band] = true;
0503     }
0504 }
0505 
0506 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
0507 {
0508     struct iwl_mvm_low_latency_iter data = {};
0509 
0510     ieee80211_iterate_active_interfaces_atomic(
0511             mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
0512             iwl_mvm_ll_iter, &data);
0513 
0514     return data.result;
0515 }
0516 
0517 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
0518 {
0519     struct iwl_mvm_low_latency_iter data = {};
0520 
0521     ieee80211_iterate_active_interfaces_atomic(
0522             mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
0523             iwl_mvm_ll_iter, &data);
0524 
0525     return data.result_per_band[band];
0526 }
0527 
0528 struct iwl_bss_iter_data {
0529     struct ieee80211_vif *vif;
0530     bool error;
0531 };
0532 
0533 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
0534                        struct ieee80211_vif *vif)
0535 {
0536     struct iwl_bss_iter_data *data = _data;
0537 
0538     if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
0539         return;
0540 
0541     if (data->vif) {
0542         data->error = true;
0543         return;
0544     }
0545 
0546     data->vif = vif;
0547 }
0548 
0549 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
0550 {
0551     struct iwl_bss_iter_data bss_iter_data = {};
0552 
0553     ieee80211_iterate_active_interfaces_atomic(
0554         mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
0555         iwl_mvm_bss_iface_iterator, &bss_iter_data);
0556 
0557     if (bss_iter_data.error) {
0558         IWL_ERR(mvm, "More than one managed interface active!\n");
0559         return ERR_PTR(-EINVAL);
0560     }
0561 
0562     return bss_iter_data.vif;
0563 }
0564 
0565 struct iwl_bss_find_iter_data {
0566     struct ieee80211_vif *vif;
0567     u32 macid;
0568 };
0569 
0570 static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
0571                         struct ieee80211_vif *vif)
0572 {
0573     struct iwl_bss_find_iter_data *data = _data;
0574     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0575 
0576     if (mvmvif->id == data->macid)
0577         data->vif = vif;
0578 }
0579 
0580 struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
0581 {
0582     struct iwl_bss_find_iter_data data = {
0583         .macid = macid,
0584     };
0585 
0586     lockdep_assert_held(&mvm->mutex);
0587 
0588     ieee80211_iterate_active_interfaces_atomic(
0589         mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
0590         iwl_mvm_bss_find_iface_iterator, &data);
0591 
0592     return data.vif;
0593 }
0594 
0595 struct iwl_sta_iter_data {
0596     bool assoc;
0597 };
0598 
0599 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
0600                        struct ieee80211_vif *vif)
0601 {
0602     struct iwl_sta_iter_data *data = _data;
0603 
0604     if (vif->type != NL80211_IFTYPE_STATION)
0605         return;
0606 
0607     if (vif->cfg.assoc)
0608         data->assoc = true;
0609 }
0610 
0611 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
0612 {
0613     struct iwl_sta_iter_data data = {
0614         .assoc = false,
0615     };
0616 
0617     ieee80211_iterate_active_interfaces_atomic(mvm->hw,
0618                            IEEE80211_IFACE_ITER_NORMAL,
0619                            iwl_mvm_sta_iface_iterator,
0620                            &data);
0621     return data.assoc;
0622 }
0623 
0624 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
0625                     struct ieee80211_vif *vif,
0626                     bool tdls, bool cmd_q)
0627 {
0628     struct iwl_fw_dbg_trigger_tlv *trigger;
0629     struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
0630     unsigned int default_timeout = cmd_q ?
0631         IWL_DEF_WD_TIMEOUT :
0632         mvm->trans->trans_cfg->base_params->wd_timeout;
0633 
0634     if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
0635         /*
0636          * We can't know when the station is asleep or awake, so we
0637          * must disable the queue hang detection.
0638          */
0639         if (fw_has_capa(&mvm->fw->ucode_capa,
0640                 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
0641             vif && vif->type == NL80211_IFTYPE_AP)
0642             return IWL_WATCHDOG_DISABLED;
0643         return default_timeout;
0644     }
0645 
0646     trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
0647     txq_timer = (void *)trigger->data;
0648 
0649     if (tdls)
0650         return le32_to_cpu(txq_timer->tdls);
0651 
0652     if (cmd_q)
0653         return le32_to_cpu(txq_timer->command_queue);
0654 
0655     if (WARN_ON(!vif))
0656         return default_timeout;
0657 
0658     switch (ieee80211_vif_type_p2p(vif)) {
0659     case NL80211_IFTYPE_ADHOC:
0660         return le32_to_cpu(txq_timer->ibss);
0661     case NL80211_IFTYPE_STATION:
0662         return le32_to_cpu(txq_timer->bss);
0663     case NL80211_IFTYPE_AP:
0664         return le32_to_cpu(txq_timer->softap);
0665     case NL80211_IFTYPE_P2P_CLIENT:
0666         return le32_to_cpu(txq_timer->p2p_client);
0667     case NL80211_IFTYPE_P2P_GO:
0668         return le32_to_cpu(txq_timer->p2p_go);
0669     case NL80211_IFTYPE_P2P_DEVICE:
0670         return le32_to_cpu(txq_timer->p2p_device);
0671     case NL80211_IFTYPE_MONITOR:
0672         return default_timeout;
0673     default:
0674         WARN_ON(1);
0675         return mvm->trans->trans_cfg->base_params->wd_timeout;
0676     }
0677 }
0678 
0679 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0680                  const char *errmsg)
0681 {
0682     struct iwl_fw_dbg_trigger_tlv *trig;
0683     struct iwl_fw_dbg_trigger_mlme *trig_mlme;
0684 
0685     trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
0686                      FW_DBG_TRIGGER_MLME);
0687     if (!trig)
0688         goto out;
0689 
0690     trig_mlme = (void *)trig->data;
0691 
0692     if (trig_mlme->stop_connection_loss &&
0693         --trig_mlme->stop_connection_loss)
0694         goto out;
0695 
0696     iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
0697 
0698 out:
0699     ieee80211_connection_loss(vif);
0700 }
0701 
0702 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
0703                       struct ieee80211_vif *vif,
0704                       const struct ieee80211_sta *sta,
0705                       u16 tid)
0706 {
0707     struct iwl_fw_dbg_trigger_tlv *trig;
0708     struct iwl_fw_dbg_trigger_ba *ba_trig;
0709 
0710     trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
0711                      FW_DBG_TRIGGER_BA);
0712     if (!trig)
0713         return;
0714 
0715     ba_trig = (void *)trig->data;
0716 
0717     if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
0718         return;
0719 
0720     iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
0721                 "Frame from %pM timed out, tid %d",
0722                 sta->addr, tid);
0723 }
0724 
0725 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
0726 {
0727     if (!elapsed)
0728         return 0;
0729 
0730     return (100 * airtime / elapsed) / USEC_PER_MSEC;
0731 }
0732 
0733 static enum iwl_mvm_traffic_load
0734 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
0735 {
0736     u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
0737 
0738     if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
0739         return IWL_MVM_TRAFFIC_HIGH;
0740     if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
0741         return IWL_MVM_TRAFFIC_MEDIUM;
0742 
0743     return IWL_MVM_TRAFFIC_LOW;
0744 }
0745 
0746 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
0747 {
0748     struct iwl_mvm *mvm = _data;
0749     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0750     bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
0751 
0752     if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
0753         return;
0754 
0755     low_latency = mvm->tcm.result.low_latency[mvmvif->id];
0756 
0757     if (!mvm->tcm.result.change[mvmvif->id] &&
0758         prev == low_latency) {
0759         iwl_mvm_update_quotas(mvm, false, NULL);
0760         return;
0761     }
0762 
0763     if (prev != low_latency) {
0764         /* this sends traffic load and updates quota as well */
0765         iwl_mvm_update_low_latency(mvm, vif, low_latency,
0766                        LOW_LATENCY_TRAFFIC);
0767     } else {
0768         iwl_mvm_update_quotas(mvm, false, NULL);
0769     }
0770 }
0771 
0772 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
0773 {
0774     mutex_lock(&mvm->mutex);
0775 
0776     ieee80211_iterate_active_interfaces(
0777         mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
0778         iwl_mvm_tcm_iter, mvm);
0779 
0780     if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
0781         iwl_mvm_config_scan(mvm);
0782 
0783     mutex_unlock(&mvm->mutex);
0784 }
0785 
0786 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
0787 {
0788     struct iwl_mvm *mvm;
0789     struct iwl_mvm_vif *mvmvif;
0790     struct ieee80211_vif *vif;
0791 
0792     mvmvif = container_of(wk, struct iwl_mvm_vif,
0793                   uapsd_nonagg_detected_wk.work);
0794     vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
0795     mvm = mvmvif->mvm;
0796 
0797     if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
0798         return;
0799 
0800     /* remember that this AP is broken */
0801     memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
0802            vif->bss_conf.bssid, ETH_ALEN);
0803     mvm->uapsd_noagg_bssid_write_idx++;
0804     if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
0805         mvm->uapsd_noagg_bssid_write_idx = 0;
0806 
0807     iwl_mvm_connection_loss(mvm, vif,
0808                 "AP isn't using AMPDU with uAPSD enabled");
0809 }
0810 
0811 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
0812                      struct ieee80211_vif *vif)
0813 {
0814     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0815 
0816     if (vif->type != NL80211_IFTYPE_STATION)
0817         return;
0818 
0819     if (!vif->cfg.assoc)
0820         return;
0821 
0822     if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
0823         !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
0824         !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
0825         !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
0826         return;
0827 
0828     if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
0829         return;
0830 
0831     mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
0832     IWL_INFO(mvm,
0833          "detected AP should do aggregation but isn't, likely due to U-APSD\n");
0834     schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
0835 }
0836 
0837 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
0838                          unsigned int elapsed,
0839                          int mac)
0840 {
0841     u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
0842     u64 tpt;
0843     unsigned long rate;
0844     struct ieee80211_vif *vif;
0845 
0846     rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
0847 
0848     if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
0849         mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
0850         return;
0851 
0852     if (iwl_mvm_has_new_rx_api(mvm)) {
0853         tpt = 8 * bytes; /* kbps */
0854         do_div(tpt, elapsed);
0855         rate *= 1000; /* kbps */
0856         if (tpt < 22 * rate / 100)
0857             return;
0858     } else {
0859         /*
0860          * the rate here is actually the threshold, in 100Kbps units,
0861          * so do the needed conversion from bytes to 100Kbps:
0862          * 100kb = bits / (100 * 1000),
0863          * 100kbps = 100kb / (msecs / 1000) ==
0864          *           (bits / (100 * 1000)) / (msecs / 1000) ==
0865          *           bits / (100 * msecs)
0866          */
0867         tpt = (8 * bytes);
0868         do_div(tpt, elapsed * 100);
0869         if (tpt < rate)
0870             return;
0871     }
0872 
0873     rcu_read_lock();
0874     vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
0875     if (vif)
0876         iwl_mvm_uapsd_agg_disconnect(mvm, vif);
0877     rcu_read_unlock();
0878 }
0879 
0880 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
0881                  struct ieee80211_vif *vif)
0882 {
0883     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
0884     u32 *band = _data;
0885 
0886     if (!mvmvif->phy_ctxt)
0887         return;
0888 
0889     band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
0890 }
0891 
0892 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
0893                         unsigned long ts,
0894                         bool handle_uapsd)
0895 {
0896     unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
0897     unsigned int uapsd_elapsed =
0898         jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
0899     u32 total_airtime = 0;
0900     u32 band_airtime[NUM_NL80211_BANDS] = {0};
0901     u32 band[NUM_MAC_INDEX_DRIVER] = {0};
0902     int ac, mac, i;
0903     bool low_latency = false;
0904     enum iwl_mvm_traffic_load load, band_load;
0905     bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
0906 
0907     if (handle_ll)
0908         mvm->tcm.ll_ts = ts;
0909     if (handle_uapsd)
0910         mvm->tcm.uapsd_nonagg_ts = ts;
0911 
0912     mvm->tcm.result.elapsed = elapsed;
0913 
0914     ieee80211_iterate_active_interfaces_atomic(mvm->hw,
0915                            IEEE80211_IFACE_ITER_NORMAL,
0916                            iwl_mvm_tcm_iterator,
0917                            &band);
0918 
0919     for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
0920         struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
0921         u32 vo_vi_pkts = 0;
0922         u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
0923 
0924         total_airtime += airtime;
0925         band_airtime[band[mac]] += airtime;
0926 
0927         load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
0928         mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
0929         mvm->tcm.result.load[mac] = load;
0930         mvm->tcm.result.airtime[mac] = airtime;
0931 
0932         for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
0933             vo_vi_pkts += mdata->rx.pkts[ac] +
0934                       mdata->tx.pkts[ac];
0935 
0936         /* enable immediately with enough packets but defer disabling */
0937         if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
0938             mvm->tcm.result.low_latency[mac] = true;
0939         else if (handle_ll)
0940             mvm->tcm.result.low_latency[mac] = false;
0941 
0942         if (handle_ll) {
0943             /* clear old data */
0944             memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
0945             memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
0946         }
0947         low_latency |= mvm->tcm.result.low_latency[mac];
0948 
0949         if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
0950             iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
0951                                  mac);
0952         /* clear old data */
0953         if (handle_uapsd)
0954             mdata->uapsd_nonagg_detect.rx_bytes = 0;
0955         memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
0956         memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
0957     }
0958 
0959     load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
0960     mvm->tcm.result.global_load = load;
0961 
0962     for (i = 0; i < NUM_NL80211_BANDS; i++) {
0963         band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
0964         mvm->tcm.result.band_load[i] = band_load;
0965     }
0966 
0967     /*
0968      * If the current load isn't low we need to force re-evaluation
0969      * in the TCM period, so that we can return to low load if there
0970      * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
0971      * triggered by traffic).
0972      */
0973     if (load != IWL_MVM_TRAFFIC_LOW)
0974         return MVM_TCM_PERIOD;
0975     /*
0976      * If low-latency is active we need to force re-evaluation after
0977      * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
0978      * when there's no traffic at all.
0979      */
0980     if (low_latency)
0981         return MVM_LL_PERIOD;
0982     /*
0983      * Otherwise, we don't need to run the work struct because we're
0984      * in the default "idle" state - traffic indication is low (which
0985      * also covers the "no traffic" case) and low-latency is disabled
0986      * so there's no state that may need to be disabled when there's
0987      * no traffic at all.
0988      *
0989      * Note that this has no impact on the regular scheduling of the
0990      * updates triggered by traffic - those happen whenever one of the
0991      * two timeouts expire (if there's traffic at all.)
0992      */
0993     return 0;
0994 }
0995 
0996 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
0997 {
0998     unsigned long ts = jiffies;
0999     bool handle_uapsd =
1000         time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1001                    msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1002 
1003     spin_lock(&mvm->tcm.lock);
1004     if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1005         spin_unlock(&mvm->tcm.lock);
1006         return;
1007     }
1008     spin_unlock(&mvm->tcm.lock);
1009 
1010     if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1011         mutex_lock(&mvm->mutex);
1012         if (iwl_mvm_request_statistics(mvm, true))
1013             handle_uapsd = false;
1014         mutex_unlock(&mvm->mutex);
1015     }
1016 
1017     spin_lock(&mvm->tcm.lock);
1018     /* re-check if somebody else won the recheck race */
1019     if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1020         /* calculate statistics */
1021         unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1022                                   handle_uapsd);
1023 
1024         /* the memset needs to be visible before the timestamp */
1025         smp_mb();
1026         mvm->tcm.ts = ts;
1027         if (work_delay)
1028             schedule_delayed_work(&mvm->tcm.work, work_delay);
1029     }
1030     spin_unlock(&mvm->tcm.lock);
1031 
1032     iwl_mvm_tcm_results(mvm);
1033 }
1034 
1035 void iwl_mvm_tcm_work(struct work_struct *work)
1036 {
1037     struct delayed_work *delayed_work = to_delayed_work(work);
1038     struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1039                        tcm.work);
1040 
1041     iwl_mvm_recalc_tcm(mvm);
1042 }
1043 
1044 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1045 {
1046     spin_lock_bh(&mvm->tcm.lock);
1047     mvm->tcm.paused = true;
1048     spin_unlock_bh(&mvm->tcm.lock);
1049     if (with_cancel)
1050         cancel_delayed_work_sync(&mvm->tcm.work);
1051 }
1052 
1053 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1054 {
1055     int mac;
1056     bool low_latency = false;
1057 
1058     spin_lock_bh(&mvm->tcm.lock);
1059     mvm->tcm.ts = jiffies;
1060     mvm->tcm.ll_ts = jiffies;
1061     for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1062         struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1063 
1064         memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1065         memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1066         memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1067         memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1068 
1069         if (mvm->tcm.result.low_latency[mac])
1070             low_latency = true;
1071     }
1072     /* The TCM data needs to be reset before "paused" flag changes */
1073     smp_mb();
1074     mvm->tcm.paused = false;
1075 
1076     /*
1077      * if the current load is not low or low latency is active, force
1078      * re-evaluation to cover the case of no traffic.
1079      */
1080     if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1081         schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1082     else if (low_latency)
1083         schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1084 
1085     spin_unlock_bh(&mvm->tcm.lock);
1086 }
1087 
1088 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1089 {
1090     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1091 
1092     INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1093               iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1094 }
1095 
1096 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1097 {
1098     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1099 
1100     cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1101 }
1102 
1103 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1104 {
1105     u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1106 
1107     if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1108         mvm->trans->cfg->gp2_reg_addr)
1109         reg_addr = mvm->trans->cfg->gp2_reg_addr;
1110 
1111     return iwl_read_prph(mvm->trans, reg_addr);
1112 }
1113 
1114 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1115                u32 *gp2, u64 *boottime, ktime_t *realtime)
1116 {
1117     bool ps_disabled;
1118 
1119     lockdep_assert_held(&mvm->mutex);
1120 
1121     /* Disable power save when reading GP2 */
1122     ps_disabled = mvm->ps_disabled;
1123     if (!ps_disabled) {
1124         mvm->ps_disabled = true;
1125         iwl_mvm_power_update_device(mvm);
1126     }
1127 
1128     *gp2 = iwl_mvm_get_systime(mvm);
1129 
1130     if (clock_type == CLOCK_BOOTTIME && boottime)
1131         *boottime = ktime_get_boottime_ns();
1132     else if (clock_type == CLOCK_REALTIME && realtime)
1133         *realtime = ktime_get_real();
1134 
1135     if (!ps_disabled) {
1136         mvm->ps_disabled = ps_disabled;
1137         iwl_mvm_power_update_device(mvm);
1138     }
1139 }