Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
0002 /*
0003  * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
0004  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
0005  * Copyright (C) 2016-2017 Intel Deutschland GmbH
0006  */
0007 #include <net/mac80211.h>
0008 
0009 #include "mvm.h"
0010 #include "sta.h"
0011 #include "rs.h"
0012 
0013 /*
0014  * New version of ADD_STA_sta command added new fields at the end of the
0015  * structure, so sending the size of the relevant API's structure is enough to
0016  * support both API versions.
0017  */
0018 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
0019 {
0020     if (iwl_mvm_has_new_rx_api(mvm) ||
0021         fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
0022         return sizeof(struct iwl_mvm_add_sta_cmd);
0023     else
0024         return sizeof(struct iwl_mvm_add_sta_cmd_v7);
0025 }
0026 
0027 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
0028                     enum nl80211_iftype iftype)
0029 {
0030     int sta_id;
0031     u32 reserved_ids = 0;
0032 
0033     BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
0034     WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
0035 
0036     lockdep_assert_held(&mvm->mutex);
0037 
0038     /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
0039     if (iftype != NL80211_IFTYPE_STATION)
0040         reserved_ids = BIT(0);
0041 
0042     /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
0043     for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
0044         if (BIT(sta_id) & reserved_ids)
0045             continue;
0046 
0047         if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
0048                            lockdep_is_held(&mvm->mutex)))
0049             return sta_id;
0050     }
0051     return IWL_MVM_INVALID_STA;
0052 }
0053 
0054 /* send station add/update command to firmware */
0055 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
0056                bool update, unsigned int flags)
0057 {
0058     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
0059     struct iwl_mvm_add_sta_cmd add_sta_cmd = {
0060         .sta_id = mvm_sta->sta_id,
0061         .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
0062         .add_modify = update ? 1 : 0,
0063         .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
0064                          STA_FLG_MIMO_EN_MSK |
0065                          STA_FLG_RTS_MIMO_PROT),
0066         .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
0067     };
0068     int ret;
0069     u32 status;
0070     u32 agg_size = 0, mpdu_dens = 0;
0071 
0072     if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
0073         add_sta_cmd.station_type = mvm_sta->sta_type;
0074 
0075     if (!update || (flags & STA_MODIFY_QUEUES)) {
0076         memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
0077 
0078         if (!iwl_mvm_has_new_tx_api(mvm)) {
0079             add_sta_cmd.tfd_queue_msk =
0080                 cpu_to_le32(mvm_sta->tfd_queue_msk);
0081 
0082             if (flags & STA_MODIFY_QUEUES)
0083                 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
0084         } else {
0085             WARN_ON(flags & STA_MODIFY_QUEUES);
0086         }
0087     }
0088 
0089     switch (sta->deflink.bandwidth) {
0090     case IEEE80211_STA_RX_BW_320:
0091     case IEEE80211_STA_RX_BW_160:
0092         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
0093         fallthrough;
0094     case IEEE80211_STA_RX_BW_80:
0095         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
0096         fallthrough;
0097     case IEEE80211_STA_RX_BW_40:
0098         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
0099         fallthrough;
0100     case IEEE80211_STA_RX_BW_20:
0101         if (sta->deflink.ht_cap.ht_supported)
0102             add_sta_cmd.station_flags |=
0103                 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
0104         break;
0105     }
0106 
0107     switch (sta->deflink.rx_nss) {
0108     case 1:
0109         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
0110         break;
0111     case 2:
0112         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
0113         break;
0114     case 3 ... 8:
0115         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
0116         break;
0117     }
0118 
0119     switch (sta->smps_mode) {
0120     case IEEE80211_SMPS_AUTOMATIC:
0121     case IEEE80211_SMPS_NUM_MODES:
0122         WARN_ON(1);
0123         break;
0124     case IEEE80211_SMPS_STATIC:
0125         /* override NSS */
0126         add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
0127         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
0128         break;
0129     case IEEE80211_SMPS_DYNAMIC:
0130         add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
0131         break;
0132     case IEEE80211_SMPS_OFF:
0133         /* nothing */
0134         break;
0135     }
0136 
0137     if (sta->deflink.ht_cap.ht_supported) {
0138         add_sta_cmd.station_flags_msk |=
0139             cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
0140                     STA_FLG_AGG_MPDU_DENS_MSK);
0141 
0142         mpdu_dens = sta->deflink.ht_cap.ampdu_density;
0143     }
0144 
0145     if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
0146         add_sta_cmd.station_flags_msk |=
0147             cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
0148                     STA_FLG_AGG_MPDU_DENS_MSK);
0149 
0150         mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
0151                       IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
0152         agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
0153                      IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
0154     } else if (sta->deflink.vht_cap.vht_supported) {
0155         agg_size = sta->deflink.vht_cap.cap &
0156             IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
0157         agg_size >>=
0158             IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
0159     } else if (sta->deflink.ht_cap.ht_supported) {
0160         agg_size = sta->deflink.ht_cap.ampdu_factor;
0161     }
0162 
0163     /* D6.0 10.12.2 A-MPDU length limit rules
0164      * A STA indicates the maximum length of the A-MPDU preEOF padding
0165      * that it can receive in an HE PPDU in the Maximum A-MPDU Length
0166      * Exponent field in its HT Capabilities, VHT Capabilities,
0167      * and HE 6 GHz Band Capabilities elements (if present) and the
0168      * Maximum AMPDU Length Exponent Extension field in its HE
0169      * Capabilities element
0170      */
0171     if (sta->deflink.he_cap.has_he)
0172         agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
0173                     IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
0174 
0175     /* Limit to max A-MPDU supported by FW */
0176     if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
0177         agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
0178                 STA_FLG_MAX_AGG_SIZE_SHIFT);
0179 
0180     add_sta_cmd.station_flags |=
0181         cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
0182     add_sta_cmd.station_flags |=
0183         cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
0184     if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
0185         add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
0186 
0187     if (sta->wme) {
0188         add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
0189 
0190         if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
0191             add_sta_cmd.uapsd_acs |= BIT(AC_BK);
0192         if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
0193             add_sta_cmd.uapsd_acs |= BIT(AC_BE);
0194         if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
0195             add_sta_cmd.uapsd_acs |= BIT(AC_VI);
0196         if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
0197             add_sta_cmd.uapsd_acs |= BIT(AC_VO);
0198         add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
0199         add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
0200     }
0201 
0202     status = ADD_STA_SUCCESS;
0203     ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
0204                       iwl_mvm_add_sta_cmd_size(mvm),
0205                       &add_sta_cmd, &status);
0206     if (ret)
0207         return ret;
0208 
0209     switch (status & IWL_ADD_STA_STATUS_MASK) {
0210     case ADD_STA_SUCCESS:
0211         IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
0212         break;
0213     default:
0214         ret = -EIO;
0215         IWL_ERR(mvm, "ADD_STA failed\n");
0216         break;
0217     }
0218 
0219     return ret;
0220 }
0221 
0222 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
0223 {
0224     struct iwl_mvm_baid_data *data =
0225         from_timer(data, t, session_timer);
0226     struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
0227     struct iwl_mvm_baid_data *ba_data;
0228     struct ieee80211_sta *sta;
0229     struct iwl_mvm_sta *mvm_sta;
0230     unsigned long timeout;
0231 
0232     rcu_read_lock();
0233 
0234     ba_data = rcu_dereference(*rcu_ptr);
0235 
0236     if (WARN_ON(!ba_data))
0237         goto unlock;
0238 
0239     if (!ba_data->timeout)
0240         goto unlock;
0241 
0242     timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
0243     if (time_is_after_jiffies(timeout)) {
0244         mod_timer(&ba_data->session_timer, timeout);
0245         goto unlock;
0246     }
0247 
0248     /* Timer expired */
0249     sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
0250 
0251     /*
0252      * sta should be valid unless the following happens:
0253      * The firmware asserts which triggers a reconfig flow, but
0254      * the reconfig fails before we set the pointer to sta into
0255      * the fw_id_to_mac_id pointer table. Mac80211 can't stop
0256      * A-MDPU and hence the timer continues to run. Then, the
0257      * timer expires and sta is NULL.
0258      */
0259     if (!sta)
0260         goto unlock;
0261 
0262     mvm_sta = iwl_mvm_sta_from_mac80211(sta);
0263     ieee80211_rx_ba_timer_expired(mvm_sta->vif,
0264                       sta->addr, ba_data->tid);
0265 unlock:
0266     rcu_read_unlock();
0267 }
0268 
0269 /* Disable aggregations for a bitmap of TIDs for a given station */
0270 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
0271                     unsigned long disable_agg_tids,
0272                     bool remove_queue)
0273 {
0274     struct iwl_mvm_add_sta_cmd cmd = {};
0275     struct ieee80211_sta *sta;
0276     struct iwl_mvm_sta *mvmsta;
0277     u32 status;
0278     u8 sta_id;
0279 
0280     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0281         return -EINVAL;
0282 
0283     sta_id = mvm->queue_info[queue].ra_sta_id;
0284 
0285     rcu_read_lock();
0286 
0287     sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
0288 
0289     if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
0290         rcu_read_unlock();
0291         return -EINVAL;
0292     }
0293 
0294     mvmsta = iwl_mvm_sta_from_mac80211(sta);
0295 
0296     mvmsta->tid_disable_agg |= disable_agg_tids;
0297 
0298     cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
0299     cmd.sta_id = mvmsta->sta_id;
0300     cmd.add_modify = STA_MODE_MODIFY;
0301     cmd.modify_mask = STA_MODIFY_QUEUES;
0302     if (disable_agg_tids)
0303         cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
0304     if (remove_queue)
0305         cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
0306     cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
0307     cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
0308 
0309     rcu_read_unlock();
0310 
0311     /* Notify FW of queue removal from the STA queues */
0312     status = ADD_STA_SUCCESS;
0313     return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
0314                        iwl_mvm_add_sta_cmd_size(mvm),
0315                        &cmd, &status);
0316 }
0317 
0318 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
0319                    u16 *queueptr, u8 tid)
0320 {
0321     int queue = *queueptr;
0322     struct iwl_scd_txq_cfg_cmd cmd = {
0323         .scd_queue = queue,
0324         .action = SCD_CFG_DISABLE_QUEUE,
0325     };
0326     int ret;
0327 
0328     lockdep_assert_held(&mvm->mutex);
0329 
0330     if (iwl_mvm_has_new_tx_api(mvm)) {
0331         if (mvm->sta_remove_requires_queue_remove) {
0332             u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
0333                          SCD_QUEUE_CONFIG_CMD);
0334             struct iwl_scd_queue_cfg_cmd remove_cmd = {
0335                 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
0336                 .u.remove.queue = cpu_to_le32(queue),
0337             };
0338 
0339             ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
0340                            sizeof(remove_cmd),
0341                            &remove_cmd);
0342         } else {
0343             ret = 0;
0344         }
0345 
0346         iwl_trans_txq_free(mvm->trans, queue);
0347         *queueptr = IWL_MVM_INVALID_QUEUE;
0348 
0349         return ret;
0350     }
0351 
0352     if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
0353         return 0;
0354 
0355     mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
0356 
0357     cmd.action = mvm->queue_info[queue].tid_bitmap ?
0358         SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
0359     if (cmd.action == SCD_CFG_DISABLE_QUEUE)
0360         mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
0361 
0362     IWL_DEBUG_TX_QUEUES(mvm,
0363                 "Disabling TXQ #%d tids=0x%x\n",
0364                 queue,
0365                 mvm->queue_info[queue].tid_bitmap);
0366 
0367     /* If the queue is still enabled - nothing left to do in this func */
0368     if (cmd.action == SCD_CFG_ENABLE_QUEUE)
0369         return 0;
0370 
0371     cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
0372     cmd.tid = mvm->queue_info[queue].txq_tid;
0373 
0374     /* Make sure queue info is correct even though we overwrite it */
0375     WARN(mvm->queue_info[queue].tid_bitmap,
0376          "TXQ #%d info out-of-sync - tids=0x%x\n",
0377          queue, mvm->queue_info[queue].tid_bitmap);
0378 
0379     /* If we are here - the queue is freed and we can zero out these vals */
0380     mvm->queue_info[queue].tid_bitmap = 0;
0381 
0382     if (sta) {
0383         struct iwl_mvm_txq *mvmtxq =
0384             iwl_mvm_txq_from_tid(sta, tid);
0385 
0386         mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
0387     }
0388 
0389     /* Regardless if this is a reserved TXQ for a STA - mark it as false */
0390     mvm->queue_info[queue].reserved = false;
0391 
0392     iwl_trans_txq_disable(mvm->trans, queue, false);
0393     ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
0394                    sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
0395 
0396     if (ret)
0397         IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
0398             queue, ret);
0399     return ret;
0400 }
0401 
0402 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
0403 {
0404     struct ieee80211_sta *sta;
0405     struct iwl_mvm_sta *mvmsta;
0406     unsigned long tid_bitmap;
0407     unsigned long agg_tids = 0;
0408     u8 sta_id;
0409     int tid;
0410 
0411     lockdep_assert_held(&mvm->mutex);
0412 
0413     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0414         return -EINVAL;
0415 
0416     sta_id = mvm->queue_info[queue].ra_sta_id;
0417     tid_bitmap = mvm->queue_info[queue].tid_bitmap;
0418 
0419     sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
0420                     lockdep_is_held(&mvm->mutex));
0421 
0422     if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
0423         return -EINVAL;
0424 
0425     mvmsta = iwl_mvm_sta_from_mac80211(sta);
0426 
0427     spin_lock_bh(&mvmsta->lock);
0428     for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
0429         if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
0430             agg_tids |= BIT(tid);
0431     }
0432     spin_unlock_bh(&mvmsta->lock);
0433 
0434     return agg_tids;
0435 }
0436 
0437 /*
0438  * Remove a queue from a station's resources.
0439  * Note that this only marks as free. It DOESN'T delete a BA agreement, and
0440  * doesn't disable the queue
0441  */
0442 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
0443 {
0444     struct ieee80211_sta *sta;
0445     struct iwl_mvm_sta *mvmsta;
0446     unsigned long tid_bitmap;
0447     unsigned long disable_agg_tids = 0;
0448     u8 sta_id;
0449     int tid;
0450 
0451     lockdep_assert_held(&mvm->mutex);
0452 
0453     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0454         return -EINVAL;
0455 
0456     sta_id = mvm->queue_info[queue].ra_sta_id;
0457     tid_bitmap = mvm->queue_info[queue].tid_bitmap;
0458 
0459     rcu_read_lock();
0460 
0461     sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
0462 
0463     if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
0464         rcu_read_unlock();
0465         return 0;
0466     }
0467 
0468     mvmsta = iwl_mvm_sta_from_mac80211(sta);
0469 
0470     spin_lock_bh(&mvmsta->lock);
0471     /* Unmap MAC queues and TIDs from this queue */
0472     for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
0473         struct iwl_mvm_txq *mvmtxq =
0474             iwl_mvm_txq_from_tid(sta, tid);
0475 
0476         if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
0477             disable_agg_tids |= BIT(tid);
0478         mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
0479 
0480         mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
0481     }
0482 
0483     mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
0484     spin_unlock_bh(&mvmsta->lock);
0485 
0486     rcu_read_unlock();
0487 
0488     /*
0489      * The TX path may have been using this TXQ_ID from the tid_data,
0490      * so make sure it's no longer running so that we can safely reuse
0491      * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
0492      * above, but nothing guarantees we've stopped using them. Thus,
0493      * without this, we could get to iwl_mvm_disable_txq() and remove
0494      * the queue while still sending frames to it.
0495      */
0496     synchronize_net();
0497 
0498     return disable_agg_tids;
0499 }
0500 
0501 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
0502                        struct ieee80211_sta *old_sta,
0503                        u8 new_sta_id)
0504 {
0505     struct iwl_mvm_sta *mvmsta;
0506     u8 sta_id, tid;
0507     unsigned long disable_agg_tids = 0;
0508     bool same_sta;
0509     u16 queue_tmp = queue;
0510     int ret;
0511 
0512     lockdep_assert_held(&mvm->mutex);
0513 
0514     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0515         return -EINVAL;
0516 
0517     sta_id = mvm->queue_info[queue].ra_sta_id;
0518     tid = mvm->queue_info[queue].txq_tid;
0519 
0520     same_sta = sta_id == new_sta_id;
0521 
0522     mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
0523     if (WARN_ON(!mvmsta))
0524         return -EINVAL;
0525 
0526     disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
0527     /* Disable the queue */
0528     if (disable_agg_tids)
0529         iwl_mvm_invalidate_sta_queue(mvm, queue,
0530                          disable_agg_tids, false);
0531 
0532     ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
0533     if (ret) {
0534         IWL_ERR(mvm,
0535             "Failed to free inactive queue %d (ret=%d)\n",
0536             queue, ret);
0537 
0538         return ret;
0539     }
0540 
0541     /* If TXQ is allocated to another STA, update removal in FW */
0542     if (!same_sta)
0543         iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
0544 
0545     return 0;
0546 }
0547 
0548 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
0549                     unsigned long tfd_queue_mask, u8 ac)
0550 {
0551     int queue = 0;
0552     u8 ac_to_queue[IEEE80211_NUM_ACS];
0553     int i;
0554 
0555     /*
0556      * This protects us against grabbing a queue that's being reconfigured
0557      * by the inactivity checker.
0558      */
0559     lockdep_assert_held(&mvm->mutex);
0560 
0561     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0562         return -EINVAL;
0563 
0564     memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
0565 
0566     /* See what ACs the existing queues for this STA have */
0567     for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
0568         /* Only DATA queues can be shared */
0569         if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
0570             i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
0571             continue;
0572 
0573         ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
0574     }
0575 
0576     /*
0577      * The queue to share is chosen only from DATA queues as follows (in
0578      * descending priority):
0579      * 1. An AC_BE queue
0580      * 2. Same AC queue
0581      * 3. Highest AC queue that is lower than new AC
0582      * 4. Any existing AC (there always is at least 1 DATA queue)
0583      */
0584 
0585     /* Priority 1: An AC_BE queue */
0586     if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
0587         queue = ac_to_queue[IEEE80211_AC_BE];
0588     /* Priority 2: Same AC queue */
0589     else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
0590         queue = ac_to_queue[ac];
0591     /* Priority 3a: If new AC is VO and VI exists - use VI */
0592     else if (ac == IEEE80211_AC_VO &&
0593          ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
0594         queue = ac_to_queue[IEEE80211_AC_VI];
0595     /* Priority 3b: No BE so only AC less than the new one is BK */
0596     else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
0597         queue = ac_to_queue[IEEE80211_AC_BK];
0598     /* Priority 4a: No BE nor BK - use VI if exists */
0599     else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
0600         queue = ac_to_queue[IEEE80211_AC_VI];
0601     /* Priority 4b: No BE, BK nor VI - use VO if exists */
0602     else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
0603         queue = ac_to_queue[IEEE80211_AC_VO];
0604 
0605     /* Make sure queue found (or not) is legal */
0606     if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
0607         !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
0608         (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
0609         IWL_ERR(mvm, "No DATA queues available to share\n");
0610         return -ENOSPC;
0611     }
0612 
0613     return queue;
0614 }
0615 
0616 /* Re-configure the SCD for a queue that has already been configured */
0617 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
0618                 int sta_id, int tid, int frame_limit, u16 ssn)
0619 {
0620     struct iwl_scd_txq_cfg_cmd cmd = {
0621         .scd_queue = queue,
0622         .action = SCD_CFG_ENABLE_QUEUE,
0623         .window = frame_limit,
0624         .sta_id = sta_id,
0625         .ssn = cpu_to_le16(ssn),
0626         .tx_fifo = fifo,
0627         .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
0628                   queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
0629         .tid = tid,
0630     };
0631     int ret;
0632 
0633     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0634         return -EINVAL;
0635 
0636     if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
0637          "Trying to reconfig unallocated queue %d\n", queue))
0638         return -ENXIO;
0639 
0640     IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
0641 
0642     ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
0643     WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
0644           queue, fifo, ret);
0645 
0646     return ret;
0647 }
0648 
0649 /*
0650  * If a given queue has a higher AC than the TID stream that is being compared
0651  * to, the queue needs to be redirected to the lower AC. This function does that
0652  * in such a case, otherwise - if no redirection required - it does nothing,
0653  * unless the %force param is true.
0654  */
0655 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
0656                   int ac, int ssn, unsigned int wdg_timeout,
0657                   bool force, struct iwl_mvm_txq *txq)
0658 {
0659     struct iwl_scd_txq_cfg_cmd cmd = {
0660         .scd_queue = queue,
0661         .action = SCD_CFG_DISABLE_QUEUE,
0662     };
0663     bool shared_queue;
0664     int ret;
0665 
0666     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0667         return -EINVAL;
0668 
0669     /*
0670      * If the AC is lower than current one - FIFO needs to be redirected to
0671      * the lowest one of the streams in the queue. Check if this is needed
0672      * here.
0673      * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
0674      * value 3 and VO with value 0, so to check if ac X is lower than ac Y
0675      * we need to check if the numerical value of X is LARGER than of Y.
0676      */
0677     if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
0678         IWL_DEBUG_TX_QUEUES(mvm,
0679                     "No redirection needed on TXQ #%d\n",
0680                     queue);
0681         return 0;
0682     }
0683 
0684     cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
0685     cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
0686     cmd.tid = mvm->queue_info[queue].txq_tid;
0687     shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
0688 
0689     IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
0690                 queue, iwl_mvm_ac_to_tx_fifo[ac]);
0691 
0692     /* Stop the queue and wait for it to empty */
0693     txq->stopped = true;
0694 
0695     ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
0696     if (ret) {
0697         IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
0698             queue);
0699         ret = -EIO;
0700         goto out;
0701     }
0702 
0703     /* Before redirecting the queue we need to de-activate it */
0704     iwl_trans_txq_disable(mvm->trans, queue, false);
0705     ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
0706     if (ret)
0707         IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
0708             ret);
0709 
0710     /* Make sure the SCD wrptr is correctly set before reconfiguring */
0711     iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
0712 
0713     /* Update the TID "owner" of the queue */
0714     mvm->queue_info[queue].txq_tid = tid;
0715 
0716     /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
0717 
0718     /* Redirect to lower AC */
0719     iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
0720                  cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
0721 
0722     /* Update AC marking of the queue */
0723     mvm->queue_info[queue].mac80211_ac = ac;
0724 
0725     /*
0726      * Mark queue as shared in transport if shared
0727      * Note this has to be done after queue enablement because enablement
0728      * can also set this value, and there is no indication there to shared
0729      * queues
0730      */
0731     if (shared_queue)
0732         iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
0733 
0734 out:
0735     /* Continue using the queue */
0736     txq->stopped = false;
0737 
0738     return ret;
0739 }
0740 
0741 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
0742                    u8 minq, u8 maxq)
0743 {
0744     int i;
0745 
0746     lockdep_assert_held(&mvm->mutex);
0747 
0748     if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
0749          "max queue %d >= num_of_queues (%d)", maxq,
0750          mvm->trans->trans_cfg->base_params->num_of_queues))
0751         maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
0752 
0753     /* This should not be hit with new TX path */
0754     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0755         return -ENOSPC;
0756 
0757     /* Start by looking for a free queue */
0758     for (i = minq; i <= maxq; i++)
0759         if (mvm->queue_info[i].tid_bitmap == 0 &&
0760             mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
0761             return i;
0762 
0763     return -ENOSPC;
0764 }
0765 
0766 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
0767                    u8 sta_id, u8 tid, unsigned int timeout)
0768 {
0769     int queue, size;
0770 
0771     if (tid == IWL_MAX_TID_COUNT) {
0772         tid = IWL_MGMT_TID;
0773         size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
0774                  mvm->trans->cfg->min_txq_size);
0775     } else {
0776         struct ieee80211_sta *sta;
0777 
0778         rcu_read_lock();
0779         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
0780 
0781         /* this queue isn't used for traffic (cab_queue) */
0782         if (IS_ERR_OR_NULL(sta)) {
0783             size = IWL_MGMT_QUEUE_SIZE;
0784         } else if (sta->deflink.he_cap.has_he) {
0785             /* support for 256 ba size */
0786             size = IWL_DEFAULT_QUEUE_SIZE_HE;
0787         } else {
0788             size = IWL_DEFAULT_QUEUE_SIZE;
0789         }
0790 
0791         rcu_read_unlock();
0792     }
0793 
0794     /* take the min with bc tbl entries allowed */
0795     size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
0796 
0797     /* size needs to be power of 2 values for calculating read/write pointers */
0798     size = rounddown_pow_of_two(size);
0799 
0800     do {
0801         queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
0802                         tid, size, timeout);
0803 
0804         if (queue < 0)
0805             IWL_DEBUG_TX_QUEUES(mvm,
0806                         "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
0807                         size, sta_id, tid, queue);
0808         size /= 2;
0809     } while (queue < 0 && size >= 16);
0810 
0811     if (queue < 0)
0812         return queue;
0813 
0814     IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
0815                 queue, sta_id, tid);
0816 
0817     return queue;
0818 }
0819 
0820 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
0821                     struct ieee80211_sta *sta, u8 ac,
0822                     int tid)
0823 {
0824     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
0825     struct iwl_mvm_txq *mvmtxq =
0826         iwl_mvm_txq_from_tid(sta, tid);
0827     unsigned int wdg_timeout =
0828         iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
0829     int queue = -1;
0830 
0831     lockdep_assert_held(&mvm->mutex);
0832 
0833     IWL_DEBUG_TX_QUEUES(mvm,
0834                 "Allocating queue for sta %d on tid %d\n",
0835                 mvmsta->sta_id, tid);
0836     queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
0837     if (queue < 0)
0838         return queue;
0839 
0840     mvmtxq->txq_id = queue;
0841     mvm->tvqm_info[queue].txq_tid = tid;
0842     mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
0843 
0844     IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
0845 
0846     spin_lock_bh(&mvmsta->lock);
0847     mvmsta->tid_data[tid].txq_id = queue;
0848     spin_unlock_bh(&mvmsta->lock);
0849 
0850     return 0;
0851 }
0852 
0853 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
0854                        struct ieee80211_sta *sta,
0855                        int queue, u8 sta_id, u8 tid)
0856 {
0857     bool enable_queue = true;
0858 
0859     /* Make sure this TID isn't already enabled */
0860     if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
0861         IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
0862             queue, tid);
0863         return false;
0864     }
0865 
0866     /* Update mappings and refcounts */
0867     if (mvm->queue_info[queue].tid_bitmap)
0868         enable_queue = false;
0869 
0870     mvm->queue_info[queue].tid_bitmap |= BIT(tid);
0871     mvm->queue_info[queue].ra_sta_id = sta_id;
0872 
0873     if (enable_queue) {
0874         if (tid != IWL_MAX_TID_COUNT)
0875             mvm->queue_info[queue].mac80211_ac =
0876                 tid_to_mac80211_ac[tid];
0877         else
0878             mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
0879 
0880         mvm->queue_info[queue].txq_tid = tid;
0881     }
0882 
0883     if (sta) {
0884         struct iwl_mvm_txq *mvmtxq =
0885             iwl_mvm_txq_from_tid(sta, tid);
0886 
0887         mvmtxq->txq_id = queue;
0888     }
0889 
0890     IWL_DEBUG_TX_QUEUES(mvm,
0891                 "Enabling TXQ #%d tids=0x%x\n",
0892                 queue, mvm->queue_info[queue].tid_bitmap);
0893 
0894     return enable_queue;
0895 }
0896 
0897 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
0898                    int queue, u16 ssn,
0899                    const struct iwl_trans_txq_scd_cfg *cfg,
0900                    unsigned int wdg_timeout)
0901 {
0902     struct iwl_scd_txq_cfg_cmd cmd = {
0903         .scd_queue = queue,
0904         .action = SCD_CFG_ENABLE_QUEUE,
0905         .window = cfg->frame_limit,
0906         .sta_id = cfg->sta_id,
0907         .ssn = cpu_to_le16(ssn),
0908         .tx_fifo = cfg->fifo,
0909         .aggregate = cfg->aggregate,
0910         .tid = cfg->tid,
0911     };
0912     bool inc_ssn;
0913 
0914     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0915         return false;
0916 
0917     /* Send the enabling command if we need to */
0918     if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
0919         return false;
0920 
0921     inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
0922                        NULL, wdg_timeout);
0923     if (inc_ssn)
0924         le16_add_cpu(&cmd.ssn, 1);
0925 
0926     WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
0927          "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
0928 
0929     return inc_ssn;
0930 }
0931 
0932 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
0933 {
0934     struct iwl_scd_txq_cfg_cmd cmd = {
0935         .scd_queue = queue,
0936         .action = SCD_CFG_UPDATE_QUEUE_TID,
0937     };
0938     int tid;
0939     unsigned long tid_bitmap;
0940     int ret;
0941 
0942     lockdep_assert_held(&mvm->mutex);
0943 
0944     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0945         return;
0946 
0947     tid_bitmap = mvm->queue_info[queue].tid_bitmap;
0948 
0949     if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
0950         return;
0951 
0952     /* Find any TID for queue */
0953     tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
0954     cmd.tid = tid;
0955     cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
0956 
0957     ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
0958     if (ret) {
0959         IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
0960             queue, ret);
0961         return;
0962     }
0963 
0964     mvm->queue_info[queue].txq_tid = tid;
0965     IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
0966                 queue, tid);
0967 }
0968 
0969 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
0970 {
0971     struct ieee80211_sta *sta;
0972     struct iwl_mvm_sta *mvmsta;
0973     u8 sta_id;
0974     int tid = -1;
0975     unsigned long tid_bitmap;
0976     unsigned int wdg_timeout;
0977     int ssn;
0978     int ret = true;
0979 
0980     /* queue sharing is disabled on new TX path */
0981     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
0982         return;
0983 
0984     lockdep_assert_held(&mvm->mutex);
0985 
0986     sta_id = mvm->queue_info[queue].ra_sta_id;
0987     tid_bitmap = mvm->queue_info[queue].tid_bitmap;
0988 
0989     /* Find TID for queue, and make sure it is the only one on the queue */
0990     tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
0991     if (tid_bitmap != BIT(tid)) {
0992         IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
0993             queue, tid_bitmap);
0994         return;
0995     }
0996 
0997     IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
0998                 tid);
0999 
1000     sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1001                     lockdep_is_held(&mvm->mutex));
1002 
1003     if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1004         return;
1005 
1006     mvmsta = iwl_mvm_sta_from_mac80211(sta);
1007     wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1008 
1009     ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1010 
1011     ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1012                      tid_to_mac80211_ac[tid], ssn,
1013                      wdg_timeout, true,
1014                      iwl_mvm_txq_from_tid(sta, tid));
1015     if (ret) {
1016         IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1017         return;
1018     }
1019 
1020     /* If aggs should be turned back on - do it */
1021     if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1022         struct iwl_mvm_add_sta_cmd cmd = {0};
1023 
1024         mvmsta->tid_disable_agg &= ~BIT(tid);
1025 
1026         cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1027         cmd.sta_id = mvmsta->sta_id;
1028         cmd.add_modify = STA_MODE_MODIFY;
1029         cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1030         cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1031         cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1032 
1033         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1034                        iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1035         if (!ret) {
1036             IWL_DEBUG_TX_QUEUES(mvm,
1037                         "TXQ #%d is now aggregated again\n",
1038                         queue);
1039 
1040             /* Mark queue intenally as aggregating again */
1041             iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1042         }
1043     }
1044 
1045     mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1046 }
1047 
1048 /*
1049  * Remove inactive TIDs of a given queue.
1050  * If all queue TIDs are inactive - mark the queue as inactive
1051  * If only some the queue TIDs are inactive - unmap them from the queue
1052  *
1053  * Returns %true if all TIDs were removed and the queue could be reused.
1054  */
1055 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1056                      struct iwl_mvm_sta *mvmsta, int queue,
1057                      unsigned long tid_bitmap,
1058                      unsigned long *unshare_queues,
1059                      unsigned long *changetid_queues)
1060 {
1061     unsigned int tid;
1062 
1063     lockdep_assert_held(&mvmsta->lock);
1064     lockdep_assert_held(&mvm->mutex);
1065 
1066     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1067         return false;
1068 
1069     /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1070     for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1071         /* If some TFDs are still queued - don't mark TID as inactive */
1072         if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1073             tid_bitmap &= ~BIT(tid);
1074 
1075         /* Don't mark as inactive any TID that has an active BA */
1076         if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1077             tid_bitmap &= ~BIT(tid);
1078     }
1079 
1080     /* If all TIDs in the queue are inactive - return it can be reused */
1081     if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1082         IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1083         return true;
1084     }
1085 
1086     /*
1087      * If we are here, this is a shared queue and not all TIDs timed-out.
1088      * Remove the ones that did.
1089      */
1090     for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1091         u16 q_tid_bitmap;
1092 
1093         mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1094         mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1095 
1096         q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1097 
1098         /*
1099          * We need to take into account a situation in which a TXQ was
1100          * allocated to TID x, and then turned shared by adding TIDs y
1101          * and z. If TID x becomes inactive and is removed from the TXQ,
1102          * ownership must be given to one of the remaining TIDs.
1103          * This is mainly because if TID x continues - a new queue can't
1104          * be allocated for it as long as it is an owner of another TXQ.
1105          *
1106          * Mark this queue in the right bitmap, we'll send the command
1107          * to the firmware later.
1108          */
1109         if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1110             set_bit(queue, changetid_queues);
1111 
1112         IWL_DEBUG_TX_QUEUES(mvm,
1113                     "Removing inactive TID %d from shared Q:%d\n",
1114                     tid, queue);
1115     }
1116 
1117     IWL_DEBUG_TX_QUEUES(mvm,
1118                 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1119                 mvm->queue_info[queue].tid_bitmap);
1120 
1121     /*
1122      * There may be different TIDs with the same mac queues, so make
1123      * sure all TIDs have existing corresponding mac queues enabled
1124      */
1125     tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1126 
1127     /* If the queue is marked as shared - "unshare" it */
1128     if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1129         mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1130         IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1131                     queue);
1132         set_bit(queue, unshare_queues);
1133     }
1134 
1135     return false;
1136 }
1137 
1138 /*
1139  * Check for inactivity - this includes checking if any queue
1140  * can be unshared and finding one (and only one) that can be
1141  * reused.
1142  * This function is also invoked as a sort of clean-up task,
1143  * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1144  *
1145  * Returns the queue number, or -ENOSPC.
1146  */
1147 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1148 {
1149     unsigned long now = jiffies;
1150     unsigned long unshare_queues = 0;
1151     unsigned long changetid_queues = 0;
1152     int i, ret, free_queue = -ENOSPC;
1153     struct ieee80211_sta *queue_owner  = NULL;
1154 
1155     lockdep_assert_held(&mvm->mutex);
1156 
1157     if (iwl_mvm_has_new_tx_api(mvm))
1158         return -ENOSPC;
1159 
1160     rcu_read_lock();
1161 
1162     /* we skip the CMD queue below by starting at 1 */
1163     BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1164 
1165     for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1166         struct ieee80211_sta *sta;
1167         struct iwl_mvm_sta *mvmsta;
1168         u8 sta_id;
1169         int tid;
1170         unsigned long inactive_tid_bitmap = 0;
1171         unsigned long queue_tid_bitmap;
1172 
1173         queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1174         if (!queue_tid_bitmap)
1175             continue;
1176 
1177         /* If TXQ isn't in active use anyway - nothing to do here... */
1178         if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1179             mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1180             continue;
1181 
1182         /* Check to see if there are inactive TIDs on this queue */
1183         for_each_set_bit(tid, &queue_tid_bitmap,
1184                  IWL_MAX_TID_COUNT + 1) {
1185             if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1186                        IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1187                 continue;
1188 
1189             inactive_tid_bitmap |= BIT(tid);
1190         }
1191 
1192         /* If all TIDs are active - finish check on this queue */
1193         if (!inactive_tid_bitmap)
1194             continue;
1195 
1196         /*
1197          * If we are here - the queue hadn't been served recently and is
1198          * in use
1199          */
1200 
1201         sta_id = mvm->queue_info[i].ra_sta_id;
1202         sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1203 
1204         /*
1205          * If the STA doesn't exist anymore, it isn't an error. It could
1206          * be that it was removed since getting the queues, and in this
1207          * case it should've inactivated its queues anyway.
1208          */
1209         if (IS_ERR_OR_NULL(sta))
1210             continue;
1211 
1212         mvmsta = iwl_mvm_sta_from_mac80211(sta);
1213 
1214         spin_lock_bh(&mvmsta->lock);
1215         ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1216                            inactive_tid_bitmap,
1217                            &unshare_queues,
1218                            &changetid_queues);
1219         if (ret && free_queue < 0) {
1220             queue_owner = sta;
1221             free_queue = i;
1222         }
1223         /* only unlock sta lock - we still need the queue info lock */
1224         spin_unlock_bh(&mvmsta->lock);
1225     }
1226 
1227 
1228     /* Reconfigure queues requiring reconfiguation */
1229     for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1230         iwl_mvm_unshare_queue(mvm, i);
1231     for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1232         iwl_mvm_change_queue_tid(mvm, i);
1233 
1234     rcu_read_unlock();
1235 
1236     if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1237         ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1238                           alloc_for_sta);
1239         if (ret)
1240             return ret;
1241     }
1242 
1243     return free_queue;
1244 }
1245 
1246 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1247                    struct ieee80211_sta *sta, u8 ac, int tid)
1248 {
1249     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1250     struct iwl_trans_txq_scd_cfg cfg = {
1251         .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1252         .sta_id = mvmsta->sta_id,
1253         .tid = tid,
1254         .frame_limit = IWL_FRAME_LIMIT,
1255     };
1256     unsigned int wdg_timeout =
1257         iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1258     int queue = -1;
1259     u16 queue_tmp;
1260     unsigned long disable_agg_tids = 0;
1261     enum iwl_mvm_agg_state queue_state;
1262     bool shared_queue = false, inc_ssn;
1263     int ssn;
1264     unsigned long tfd_queue_mask;
1265     int ret;
1266 
1267     lockdep_assert_held(&mvm->mutex);
1268 
1269     if (iwl_mvm_has_new_tx_api(mvm))
1270         return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1271 
1272     spin_lock_bh(&mvmsta->lock);
1273     tfd_queue_mask = mvmsta->tfd_queue_msk;
1274     ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1275     spin_unlock_bh(&mvmsta->lock);
1276 
1277     if (tid == IWL_MAX_TID_COUNT) {
1278         queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1279                         IWL_MVM_DQA_MIN_MGMT_QUEUE,
1280                         IWL_MVM_DQA_MAX_MGMT_QUEUE);
1281         if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1282             IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1283                         queue);
1284 
1285         /* If no such queue is found, we'll use a DATA queue instead */
1286     }
1287 
1288     if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1289         (mvm->queue_info[mvmsta->reserved_queue].status ==
1290             IWL_MVM_QUEUE_RESERVED)) {
1291         queue = mvmsta->reserved_queue;
1292         mvm->queue_info[queue].reserved = true;
1293         IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1294     }
1295 
1296     if (queue < 0)
1297         queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1298                         IWL_MVM_DQA_MIN_DATA_QUEUE,
1299                         IWL_MVM_DQA_MAX_DATA_QUEUE);
1300     if (queue < 0) {
1301         /* try harder - perhaps kill an inactive queue */
1302         queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1303     }
1304 
1305     /* No free queue - we'll have to share */
1306     if (queue <= 0) {
1307         queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1308         if (queue > 0) {
1309             shared_queue = true;
1310             mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1311         }
1312     }
1313 
1314     /*
1315      * Mark TXQ as ready, even though it hasn't been fully configured yet,
1316      * to make sure no one else takes it.
1317      * This will allow avoiding re-acquiring the lock at the end of the
1318      * configuration. On error we'll mark it back as free.
1319      */
1320     if (queue > 0 && !shared_queue)
1321         mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1322 
1323     /* This shouldn't happen - out of queues */
1324     if (WARN_ON(queue <= 0)) {
1325         IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1326             tid, cfg.sta_id);
1327         return queue;
1328     }
1329 
1330     /*
1331      * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1332      * but for configuring the SCD to send A-MPDUs we need to mark the queue
1333      * as aggregatable.
1334      * Mark all DATA queues as allowing to be aggregated at some point
1335      */
1336     cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1337              queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1338 
1339     IWL_DEBUG_TX_QUEUES(mvm,
1340                 "Allocating %squeue #%d to sta %d on tid %d\n",
1341                 shared_queue ? "shared " : "", queue,
1342                 mvmsta->sta_id, tid);
1343 
1344     if (shared_queue) {
1345         /* Disable any open aggs on this queue */
1346         disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1347 
1348         if (disable_agg_tids) {
1349             IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1350                         queue);
1351             iwl_mvm_invalidate_sta_queue(mvm, queue,
1352                              disable_agg_tids, false);
1353         }
1354     }
1355 
1356     inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1357 
1358     /*
1359      * Mark queue as shared in transport if shared
1360      * Note this has to be done after queue enablement because enablement
1361      * can also set this value, and there is no indication there to shared
1362      * queues
1363      */
1364     if (shared_queue)
1365         iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1366 
1367     spin_lock_bh(&mvmsta->lock);
1368     /*
1369      * This looks racy, but it is not. We have only one packet for
1370      * this ra/tid in our Tx path since we stop the Qdisc when we
1371      * need to allocate a new TFD queue.
1372      */
1373     if (inc_ssn) {
1374         mvmsta->tid_data[tid].seq_number += 0x10;
1375         ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1376     }
1377     mvmsta->tid_data[tid].txq_id = queue;
1378     mvmsta->tfd_queue_msk |= BIT(queue);
1379     queue_state = mvmsta->tid_data[tid].state;
1380 
1381     if (mvmsta->reserved_queue == queue)
1382         mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1383     spin_unlock_bh(&mvmsta->lock);
1384 
1385     if (!shared_queue) {
1386         ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1387         if (ret)
1388             goto out_err;
1389 
1390         /* If we need to re-enable aggregations... */
1391         if (queue_state == IWL_AGG_ON) {
1392             ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1393             if (ret)
1394                 goto out_err;
1395         }
1396     } else {
1397         /* Redirect queue, if needed */
1398         ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1399                          wdg_timeout, false,
1400                          iwl_mvm_txq_from_tid(sta, tid));
1401         if (ret)
1402             goto out_err;
1403     }
1404 
1405     return 0;
1406 
1407 out_err:
1408     queue_tmp = queue;
1409     iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
1410 
1411     return ret;
1412 }
1413 
1414 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1415 {
1416     struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1417                        add_stream_wk);
1418 
1419     mutex_lock(&mvm->mutex);
1420 
1421     iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1422 
1423     while (!list_empty(&mvm->add_stream_txqs)) {
1424         struct iwl_mvm_txq *mvmtxq;
1425         struct ieee80211_txq *txq;
1426         u8 tid;
1427 
1428         mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1429                       struct iwl_mvm_txq, list);
1430 
1431         txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1432                    drv_priv);
1433         tid = txq->tid;
1434         if (tid == IEEE80211_NUM_TIDS)
1435             tid = IWL_MAX_TID_COUNT;
1436 
1437         /*
1438          * We can't really do much here, but if this fails we can't
1439          * transmit anyway - so just don't transmit the frame etc.
1440          * and let them back up ... we've tried our best to allocate
1441          * a queue in the function itself.
1442          */
1443         if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1444             list_del_init(&mvmtxq->list);
1445             continue;
1446         }
1447 
1448         list_del_init(&mvmtxq->list);
1449         local_bh_disable();
1450         iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1451         local_bh_enable();
1452     }
1453 
1454     mutex_unlock(&mvm->mutex);
1455 }
1456 
1457 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1458                       struct ieee80211_sta *sta,
1459                       enum nl80211_iftype vif_type)
1460 {
1461     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1462     int queue;
1463 
1464     /* queue reserving is disabled on new TX path */
1465     if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1466         return 0;
1467 
1468     /* run the general cleanup/unsharing of queues */
1469     iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1470 
1471     /* Make sure we have free resources for this STA */
1472     if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1473         !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1474         (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1475          IWL_MVM_QUEUE_FREE))
1476         queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1477     else
1478         queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1479                         IWL_MVM_DQA_MIN_DATA_QUEUE,
1480                         IWL_MVM_DQA_MAX_DATA_QUEUE);
1481     if (queue < 0) {
1482         /* try again - this time kick out a queue if needed */
1483         queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1484         if (queue < 0) {
1485             IWL_ERR(mvm, "No available queues for new station\n");
1486             return -ENOSPC;
1487         }
1488     }
1489     mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1490 
1491     mvmsta->reserved_queue = queue;
1492 
1493     IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1494                 queue, mvmsta->sta_id);
1495 
1496     return 0;
1497 }
1498 
1499 /*
1500  * In DQA mode, after a HW restart the queues should be allocated as before, in
1501  * order to avoid race conditions when there are shared queues. This function
1502  * does the re-mapping and queue allocation.
1503  *
1504  * Note that re-enabling aggregations isn't done in this function.
1505  */
1506 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1507                          struct ieee80211_sta *sta)
1508 {
1509     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1510     unsigned int wdg =
1511         iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1512     int i;
1513     struct iwl_trans_txq_scd_cfg cfg = {
1514         .sta_id = mvm_sta->sta_id,
1515         .frame_limit = IWL_FRAME_LIMIT,
1516     };
1517 
1518     /* Make sure reserved queue is still marked as such (if allocated) */
1519     if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1520         mvm->queue_info[mvm_sta->reserved_queue].status =
1521             IWL_MVM_QUEUE_RESERVED;
1522 
1523     for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1524         struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1525         int txq_id = tid_data->txq_id;
1526         int ac;
1527 
1528         if (txq_id == IWL_MVM_INVALID_QUEUE)
1529             continue;
1530 
1531         ac = tid_to_mac80211_ac[i];
1532 
1533         if (iwl_mvm_has_new_tx_api(mvm)) {
1534             IWL_DEBUG_TX_QUEUES(mvm,
1535                         "Re-mapping sta %d tid %d\n",
1536                         mvm_sta->sta_id, i);
1537             txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1538                              i, wdg);
1539             /*
1540              * on failures, just set it to IWL_MVM_INVALID_QUEUE
1541              * to try again later, we have no other good way of
1542              * failing here
1543              */
1544             if (txq_id < 0)
1545                 txq_id = IWL_MVM_INVALID_QUEUE;
1546             tid_data->txq_id = txq_id;
1547 
1548             /*
1549              * Since we don't set the seq number after reset, and HW
1550              * sets it now, FW reset will cause the seq num to start
1551              * at 0 again, so driver will need to update it
1552              * internally as well, so it keeps in sync with real val
1553              */
1554             tid_data->seq_number = 0;
1555         } else {
1556             u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1557 
1558             cfg.tid = i;
1559             cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1560             cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1561                      txq_id ==
1562                      IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1563 
1564             IWL_DEBUG_TX_QUEUES(mvm,
1565                         "Re-mapping sta %d tid %d to queue %d\n",
1566                         mvm_sta->sta_id, i, txq_id);
1567 
1568             iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1569             mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1570         }
1571     }
1572 }
1573 
1574 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1575                       struct iwl_mvm_int_sta *sta,
1576                       const u8 *addr,
1577                       u16 mac_id, u16 color)
1578 {
1579     struct iwl_mvm_add_sta_cmd cmd;
1580     int ret;
1581     u32 status = ADD_STA_SUCCESS;
1582 
1583     lockdep_assert_held(&mvm->mutex);
1584 
1585     memset(&cmd, 0, sizeof(cmd));
1586     cmd.sta_id = sta->sta_id;
1587 
1588     if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
1589         sta->type == IWL_STA_AUX_ACTIVITY)
1590         cmd.mac_id_n_color = cpu_to_le32(mac_id);
1591     else
1592         cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1593                                      color));
1594 
1595     if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1596         cmd.station_type = sta->type;
1597 
1598     if (!iwl_mvm_has_new_tx_api(mvm))
1599         cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1600     cmd.tid_disable_tx = cpu_to_le16(0xffff);
1601 
1602     if (addr)
1603         memcpy(cmd.addr, addr, ETH_ALEN);
1604 
1605     ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1606                       iwl_mvm_add_sta_cmd_size(mvm),
1607                       &cmd, &status);
1608     if (ret)
1609         return ret;
1610 
1611     switch (status & IWL_ADD_STA_STATUS_MASK) {
1612     case ADD_STA_SUCCESS:
1613         IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1614         return 0;
1615     default:
1616         ret = -EIO;
1617         IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1618             status);
1619         break;
1620     }
1621     return ret;
1622 }
1623 
1624 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1625             struct ieee80211_vif *vif,
1626             struct ieee80211_sta *sta)
1627 {
1628     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1629     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1630     struct iwl_mvm_rxq_dup_data *dup_data;
1631     int i, ret, sta_id;
1632     bool sta_update = false;
1633     unsigned int sta_flags = 0;
1634 
1635     lockdep_assert_held(&mvm->mutex);
1636 
1637     if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1638         sta_id = iwl_mvm_find_free_sta_id(mvm,
1639                           ieee80211_vif_type_p2p(vif));
1640     else
1641         sta_id = mvm_sta->sta_id;
1642 
1643     if (sta_id == IWL_MVM_INVALID_STA)
1644         return -ENOSPC;
1645 
1646     spin_lock_init(&mvm_sta->lock);
1647 
1648     /* if this is a HW restart re-alloc existing queues */
1649     if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1650         struct iwl_mvm_int_sta tmp_sta = {
1651             .sta_id = sta_id,
1652             .type = mvm_sta->sta_type,
1653         };
1654 
1655         /*
1656          * First add an empty station since allocating
1657          * a queue requires a valid station
1658          */
1659         ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1660                          mvmvif->id, mvmvif->color);
1661         if (ret)
1662             goto err;
1663 
1664         iwl_mvm_realloc_queues_after_restart(mvm, sta);
1665         sta_update = true;
1666         sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1667         goto update_fw;
1668     }
1669 
1670     mvm_sta->sta_id = sta_id;
1671     mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1672                               mvmvif->color);
1673     mvm_sta->vif = vif;
1674     if (!mvm->trans->trans_cfg->gen2)
1675         mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1676     else
1677         mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1678     mvm_sta->tx_protection = 0;
1679     mvm_sta->tt_tx_protection = false;
1680     mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1681 
1682     /* HW restart, don't assume the memory has been zeroed */
1683     mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1684     mvm_sta->tfd_queue_msk = 0;
1685 
1686     /* for HW restart - reset everything but the sequence number */
1687     for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1688         u16 seq = mvm_sta->tid_data[i].seq_number;
1689         memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1690         mvm_sta->tid_data[i].seq_number = seq;
1691 
1692         /*
1693          * Mark all queues for this STA as unallocated and defer TX
1694          * frames until the queue is allocated
1695          */
1696         mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1697     }
1698 
1699     for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1700         struct iwl_mvm_txq *mvmtxq =
1701             iwl_mvm_txq_from_mac80211(sta->txq[i]);
1702 
1703         mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1704         INIT_LIST_HEAD(&mvmtxq->list);
1705         atomic_set(&mvmtxq->tx_request, 0);
1706     }
1707 
1708     mvm_sta->agg_tids = 0;
1709 
1710     if (iwl_mvm_has_new_rx_api(mvm) &&
1711         !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1712         int q;
1713 
1714         dup_data = kcalloc(mvm->trans->num_rx_queues,
1715                    sizeof(*dup_data), GFP_KERNEL);
1716         if (!dup_data)
1717             return -ENOMEM;
1718         /*
1719          * Initialize all the last_seq values to 0xffff which can never
1720          * compare equal to the frame's seq_ctrl in the check in
1721          * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1722          * number and fragmented packets don't reach that function.
1723          *
1724          * This thus allows receiving a packet with seqno 0 and the
1725          * retry bit set as the very first packet on a new TID.
1726          */
1727         for (q = 0; q < mvm->trans->num_rx_queues; q++)
1728             memset(dup_data[q].last_seq, 0xff,
1729                    sizeof(dup_data[q].last_seq));
1730         mvm_sta->dup_data = dup_data;
1731     }
1732 
1733     if (!iwl_mvm_has_new_tx_api(mvm)) {
1734         ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1735                          ieee80211_vif_type_p2p(vif));
1736         if (ret)
1737             goto err;
1738     }
1739 
1740     /*
1741      * if rs is registered with mac80211, then "add station" will be handled
1742      * via the corresponding ops, otherwise need to notify rate scaling here
1743      */
1744     if (iwl_mvm_has_tlc_offload(mvm))
1745         iwl_mvm_rs_add_sta(mvm, mvm_sta);
1746     else
1747         spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1748 
1749     iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1750 
1751 update_fw:
1752     ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1753     if (ret)
1754         goto err;
1755 
1756     if (vif->type == NL80211_IFTYPE_STATION) {
1757         if (!sta->tdls) {
1758             WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1759             mvmvif->ap_sta_id = sta_id;
1760         } else {
1761             WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1762         }
1763     }
1764 
1765     rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1766 
1767     return 0;
1768 
1769 err:
1770     return ret;
1771 }
1772 
1773 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1774               bool drain)
1775 {
1776     struct iwl_mvm_add_sta_cmd cmd = {};
1777     int ret;
1778     u32 status;
1779 
1780     lockdep_assert_held(&mvm->mutex);
1781 
1782     cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1783     cmd.sta_id = mvmsta->sta_id;
1784     cmd.add_modify = STA_MODE_MODIFY;
1785     cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1786     cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1787 
1788     status = ADD_STA_SUCCESS;
1789     ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1790                       iwl_mvm_add_sta_cmd_size(mvm),
1791                       &cmd, &status);
1792     if (ret)
1793         return ret;
1794 
1795     switch (status & IWL_ADD_STA_STATUS_MASK) {
1796     case ADD_STA_SUCCESS:
1797         IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1798                    mvmsta->sta_id);
1799         break;
1800     default:
1801         ret = -EIO;
1802         IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1803             mvmsta->sta_id);
1804         break;
1805     }
1806 
1807     return ret;
1808 }
1809 
1810 /*
1811  * Remove a station from the FW table. Before sending the command to remove
1812  * the station validate that the station is indeed known to the driver (sanity
1813  * only).
1814  */
1815 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1816 {
1817     struct ieee80211_sta *sta;
1818     struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1819         .sta_id = sta_id,
1820     };
1821     int ret;
1822 
1823     sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1824                     lockdep_is_held(&mvm->mutex));
1825 
1826     /* Note: internal stations are marked as error values */
1827     if (!sta) {
1828         IWL_ERR(mvm, "Invalid station id\n");
1829         return -EINVAL;
1830     }
1831 
1832     ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1833                    sizeof(rm_sta_cmd), &rm_sta_cmd);
1834     if (ret) {
1835         IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1836         return ret;
1837     }
1838 
1839     return 0;
1840 }
1841 
1842 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1843                        struct ieee80211_vif *vif,
1844                        struct ieee80211_sta *sta)
1845 {
1846     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1847     int i;
1848 
1849     lockdep_assert_held(&mvm->mutex);
1850 
1851     for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1852         if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1853             continue;
1854 
1855         iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
1856         mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1857     }
1858 
1859     for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1860         struct iwl_mvm_txq *mvmtxq =
1861             iwl_mvm_txq_from_mac80211(sta->txq[i]);
1862 
1863         mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1864         list_del_init(&mvmtxq->list);
1865     }
1866 }
1867 
1868 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1869                   struct iwl_mvm_sta *mvm_sta)
1870 {
1871     int i;
1872 
1873     for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1874         u16 txq_id;
1875         int ret;
1876 
1877         spin_lock_bh(&mvm_sta->lock);
1878         txq_id = mvm_sta->tid_data[i].txq_id;
1879         spin_unlock_bh(&mvm_sta->lock);
1880 
1881         if (txq_id == IWL_MVM_INVALID_QUEUE)
1882             continue;
1883 
1884         ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1885         if (ret)
1886             return ret;
1887     }
1888 
1889     return 0;
1890 }
1891 
1892 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1893            struct ieee80211_vif *vif,
1894            struct ieee80211_sta *sta)
1895 {
1896     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1897     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1898     u8 sta_id = mvm_sta->sta_id;
1899     int ret;
1900 
1901     lockdep_assert_held(&mvm->mutex);
1902 
1903     if (iwl_mvm_has_new_rx_api(mvm))
1904         kfree(mvm_sta->dup_data);
1905 
1906     ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1907     if (ret)
1908         return ret;
1909 
1910     /* flush its queues here since we are freeing mvm_sta */
1911     ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1912     if (ret)
1913         return ret;
1914     if (iwl_mvm_has_new_tx_api(mvm)) {
1915         ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1916     } else {
1917         u32 q_mask = mvm_sta->tfd_queue_msk;
1918 
1919         ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1920                              q_mask);
1921     }
1922     if (ret)
1923         return ret;
1924 
1925     ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1926 
1927     iwl_mvm_disable_sta_queues(mvm, vif, sta);
1928 
1929     /* If there is a TXQ still marked as reserved - free it */
1930     if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1931         u8 reserved_txq = mvm_sta->reserved_queue;
1932         enum iwl_mvm_queue_status *status;
1933 
1934         /*
1935          * If no traffic has gone through the reserved TXQ - it
1936          * is still marked as IWL_MVM_QUEUE_RESERVED, and
1937          * should be manually marked as free again
1938          */
1939         status = &mvm->queue_info[reserved_txq].status;
1940         if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1941              (*status != IWL_MVM_QUEUE_FREE),
1942              "sta_id %d reserved txq %d status %d",
1943              sta_id, reserved_txq, *status))
1944             return -EINVAL;
1945 
1946         *status = IWL_MVM_QUEUE_FREE;
1947     }
1948 
1949     if (vif->type == NL80211_IFTYPE_STATION &&
1950         mvmvif->ap_sta_id == sta_id) {
1951         /* if associated - we can't remove the AP STA now */
1952         if (vif->cfg.assoc)
1953             return ret;
1954 
1955         /* unassoc - go ahead - remove the AP STA now */
1956         mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1957     }
1958 
1959     /*
1960      * This shouldn't happen - the TDLS channel switch should be canceled
1961      * before the STA is removed.
1962      */
1963     if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1964         mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1965         cancel_delayed_work(&mvm->tdls_cs.dwork);
1966     }
1967 
1968     /*
1969      * Make sure that the tx response code sees the station as -EBUSY and
1970      * calls the drain worker.
1971      */
1972     spin_lock_bh(&mvm_sta->lock);
1973     spin_unlock_bh(&mvm_sta->lock);
1974 
1975     ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1976     RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1977 
1978     return ret;
1979 }
1980 
1981 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1982               struct ieee80211_vif *vif,
1983               u8 sta_id)
1984 {
1985     int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1986 
1987     lockdep_assert_held(&mvm->mutex);
1988 
1989     RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1990     return ret;
1991 }
1992 
1993 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1994                  struct iwl_mvm_int_sta *sta,
1995                  u32 qmask, enum nl80211_iftype iftype,
1996                  enum iwl_sta_type type)
1997 {
1998     if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1999         sta->sta_id == IWL_MVM_INVALID_STA) {
2000         sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2001         if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2002             return -ENOSPC;
2003     }
2004 
2005     sta->tfd_queue_msk = qmask;
2006     sta->type = type;
2007 
2008     /* put a non-NULL value so iterating over the stations won't stop */
2009     rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2010     return 0;
2011 }
2012 
2013 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2014 {
2015     RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2016     memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2017     sta->sta_id = IWL_MVM_INVALID_STA;
2018 }
2019 
2020 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2021                       u8 sta_id, u8 fifo)
2022 {
2023     unsigned int wdg_timeout =
2024         mvm->trans->trans_cfg->base_params->wd_timeout;
2025     struct iwl_trans_txq_scd_cfg cfg = {
2026         .fifo = fifo,
2027         .sta_id = sta_id,
2028         .tid = IWL_MAX_TID_COUNT,
2029         .aggregate = false,
2030         .frame_limit = IWL_FRAME_LIMIT,
2031     };
2032 
2033     WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2034 
2035     iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2036 }
2037 
2038 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2039 {
2040     unsigned int wdg_timeout =
2041         mvm->trans->trans_cfg->base_params->wd_timeout;
2042 
2043     WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2044 
2045     return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2046                        wdg_timeout);
2047 }
2048 
2049 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2050                       int maccolor, u8 *addr,
2051                       struct iwl_mvm_int_sta *sta,
2052                       u16 *queue, int fifo)
2053 {
2054     int ret;
2055 
2056     /* Map queue to fifo - needs to happen before adding station */
2057     if (!iwl_mvm_has_new_tx_api(mvm))
2058         iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2059 
2060     ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2061     if (ret) {
2062         if (!iwl_mvm_has_new_tx_api(mvm))
2063             iwl_mvm_disable_txq(mvm, NULL, queue,
2064                         IWL_MAX_TID_COUNT);
2065         return ret;
2066     }
2067 
2068     /*
2069      * For 22000 firmware and on we cannot add queue to a station unknown
2070      * to firmware so enable queue here - after the station was added
2071      */
2072     if (iwl_mvm_has_new_tx_api(mvm)) {
2073         int txq;
2074 
2075         txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2076         if (txq < 0) {
2077             iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2078             return txq;
2079         }
2080 
2081         *queue = txq;
2082     }
2083 
2084     return 0;
2085 }
2086 
2087 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2088 {
2089     int ret;
2090 
2091     lockdep_assert_held(&mvm->mutex);
2092 
2093     /* Allocate aux station and assign to it the aux queue */
2094     ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2095                        NL80211_IFTYPE_UNSPECIFIED,
2096                        IWL_STA_AUX_ACTIVITY);
2097     if (ret)
2098         return ret;
2099 
2100     /*
2101      * In CDB NICs we need to specify which lmac to use for aux activity
2102      * using the mac_id argument place to send lmac_id to the function
2103      */
2104     ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2105                          &mvm->aux_sta, &mvm->aux_queue,
2106                          IWL_MVM_TX_FIFO_MCAST);
2107     if (ret) {
2108         iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2109         return ret;
2110     }
2111 
2112     return 0;
2113 }
2114 
2115 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2116 {
2117     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2118 
2119     lockdep_assert_held(&mvm->mutex);
2120 
2121     return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2122                           NULL, &mvm->snif_sta,
2123                           &mvm->snif_queue,
2124                           IWL_MVM_TX_FIFO_BE);
2125 }
2126 
2127 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2128 {
2129     int ret;
2130 
2131     lockdep_assert_held(&mvm->mutex);
2132 
2133     if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2134         return -EINVAL;
2135 
2136     iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
2137     ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2138     if (ret)
2139         IWL_WARN(mvm, "Failed sending remove station\n");
2140 
2141     return ret;
2142 }
2143 
2144 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2145 {
2146     int ret;
2147 
2148     lockdep_assert_held(&mvm->mutex);
2149 
2150     if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2151         return -EINVAL;
2152 
2153     iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
2154     ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2155     if (ret)
2156         IWL_WARN(mvm, "Failed sending remove station\n");
2157     iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2158 
2159     return ret;
2160 }
2161 
2162 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2163 {
2164     iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2165 }
2166 
2167 /*
2168  * Send the add station command for the vif's broadcast station.
2169  * Assumes that the station was already allocated.
2170  *
2171  * @mvm: the mvm component
2172  * @vif: the interface to which the broadcast station is added
2173  * @bsta: the broadcast station to add.
2174  */
2175 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2176 {
2177     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2178     struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2179     static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2180     const u8 *baddr = _baddr;
2181     int queue;
2182     int ret;
2183     unsigned int wdg_timeout =
2184         iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2185     struct iwl_trans_txq_scd_cfg cfg = {
2186         .fifo = IWL_MVM_TX_FIFO_VO,
2187         .sta_id = mvmvif->bcast_sta.sta_id,
2188         .tid = IWL_MAX_TID_COUNT,
2189         .aggregate = false,
2190         .frame_limit = IWL_FRAME_LIMIT,
2191     };
2192 
2193     lockdep_assert_held(&mvm->mutex);
2194 
2195     if (!iwl_mvm_has_new_tx_api(mvm)) {
2196         if (vif->type == NL80211_IFTYPE_AP ||
2197             vif->type == NL80211_IFTYPE_ADHOC) {
2198             queue = mvm->probe_queue;
2199         } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2200             queue = mvm->p2p_dev_queue;
2201         } else {
2202             WARN(1, "Missing required TXQ for adding bcast STA\n");
2203             return -EINVAL;
2204         }
2205 
2206         bsta->tfd_queue_msk |= BIT(queue);
2207 
2208         iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2209     }
2210 
2211     if (vif->type == NL80211_IFTYPE_ADHOC)
2212         baddr = vif->bss_conf.bssid;
2213 
2214     if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2215         return -ENOSPC;
2216 
2217     ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2218                      mvmvif->id, mvmvif->color);
2219     if (ret)
2220         return ret;
2221 
2222     /*
2223      * For 22000 firmware and on we cannot add queue to a station unknown
2224      * to firmware so enable queue here - after the station was added
2225      */
2226     if (iwl_mvm_has_new_tx_api(mvm)) {
2227         queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2228                         IWL_MAX_TID_COUNT,
2229                         wdg_timeout);
2230         if (queue < 0) {
2231             iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2232             return queue;
2233         }
2234 
2235         if (vif->type == NL80211_IFTYPE_AP ||
2236             vif->type == NL80211_IFTYPE_ADHOC)
2237             mvm->probe_queue = queue;
2238         else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2239             mvm->p2p_dev_queue = queue;
2240     }
2241 
2242     return 0;
2243 }
2244 
2245 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2246                       struct ieee80211_vif *vif)
2247 {
2248     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2249     u16 *queueptr, queue;
2250 
2251     lockdep_assert_held(&mvm->mutex);
2252 
2253     iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2254 
2255     switch (vif->type) {
2256     case NL80211_IFTYPE_AP:
2257     case NL80211_IFTYPE_ADHOC:
2258         queueptr = &mvm->probe_queue;
2259         break;
2260     case NL80211_IFTYPE_P2P_DEVICE:
2261         queueptr = &mvm->p2p_dev_queue;
2262         break;
2263     default:
2264         WARN(1, "Can't free bcast queue on vif type %d\n",
2265              vif->type);
2266         return;
2267     }
2268 
2269     queue = *queueptr;
2270     iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
2271     if (iwl_mvm_has_new_tx_api(mvm))
2272         return;
2273 
2274     WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2275     mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2276 }
2277 
2278 /* Send the FW a request to remove the station from it's internal data
2279  * structures, but DO NOT remove the entry from the local data structures. */
2280 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2281 {
2282     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2283     int ret;
2284 
2285     lockdep_assert_held(&mvm->mutex);
2286 
2287     iwl_mvm_free_bcast_sta_queues(mvm, vif);
2288 
2289     ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2290     if (ret)
2291         IWL_WARN(mvm, "Failed sending remove station\n");
2292     return ret;
2293 }
2294 
2295 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2296 {
2297     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2298 
2299     lockdep_assert_held(&mvm->mutex);
2300 
2301     return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2302                     ieee80211_vif_type_p2p(vif),
2303                     IWL_STA_GENERAL_PURPOSE);
2304 }
2305 
2306 /* Allocate a new station entry for the broadcast station to the given vif,
2307  * and send it to the FW.
2308  * Note that each P2P mac should have its own broadcast station.
2309  *
2310  * @mvm: the mvm component
2311  * @vif: the interface to which the broadcast station is added
2312  * @bsta: the broadcast station to add. */
2313 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2314 {
2315     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2316     struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2317     int ret;
2318 
2319     lockdep_assert_held(&mvm->mutex);
2320 
2321     ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2322     if (ret)
2323         return ret;
2324 
2325     ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2326 
2327     if (ret)
2328         iwl_mvm_dealloc_int_sta(mvm, bsta);
2329 
2330     return ret;
2331 }
2332 
2333 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2334 {
2335     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2336 
2337     iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2338 }
2339 
2340 /*
2341  * Send the FW a request to remove the station from it's internal data
2342  * structures, and in addition remove it from the local data structure.
2343  */
2344 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2345 {
2346     int ret;
2347 
2348     lockdep_assert_held(&mvm->mutex);
2349 
2350     ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2351 
2352     iwl_mvm_dealloc_bcast_sta(mvm, vif);
2353 
2354     return ret;
2355 }
2356 
2357 /*
2358  * Allocate a new station entry for the multicast station to the given vif,
2359  * and send it to the FW.
2360  * Note that each AP/GO mac should have its own multicast station.
2361  *
2362  * @mvm: the mvm component
2363  * @vif: the interface to which the multicast station is added
2364  */
2365 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2366 {
2367     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2368     struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2369     static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2370     const u8 *maddr = _maddr;
2371     struct iwl_trans_txq_scd_cfg cfg = {
2372         .fifo = vif->type == NL80211_IFTYPE_AP ?
2373             IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2374         .sta_id = msta->sta_id,
2375         .tid = 0,
2376         .aggregate = false,
2377         .frame_limit = IWL_FRAME_LIMIT,
2378     };
2379     unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2380     int ret;
2381 
2382     lockdep_assert_held(&mvm->mutex);
2383 
2384     if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2385             vif->type != NL80211_IFTYPE_ADHOC))
2386         return -ENOTSUPP;
2387 
2388     /*
2389      * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2390      * invalid, so make sure we use the queue we want.
2391      * Note that this is done here as we want to avoid making DQA
2392      * changes in mac80211 layer.
2393      */
2394     if (vif->type == NL80211_IFTYPE_ADHOC)
2395         mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2396 
2397     /*
2398      * While in previous FWs we had to exclude cab queue from TFD queue
2399      * mask, now it is needed as any other queue.
2400      */
2401     if (!iwl_mvm_has_new_tx_api(mvm) &&
2402         fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2403         iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2404                    timeout);
2405         msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2406     }
2407     ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2408                      mvmvif->id, mvmvif->color);
2409     if (ret)
2410         goto err;
2411 
2412     /*
2413      * Enable cab queue after the ADD_STA command is sent.
2414      * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2415      * command with unknown station id, and for FW that doesn't support
2416      * station API since the cab queue is not included in the
2417      * tfd_queue_mask.
2418      */
2419     if (iwl_mvm_has_new_tx_api(mvm)) {
2420         int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2421                             0,
2422                             timeout);
2423         if (queue < 0) {
2424             ret = queue;
2425             goto err;
2426         }
2427         mvmvif->cab_queue = queue;
2428     } else if (!fw_has_api(&mvm->fw->ucode_capa,
2429                    IWL_UCODE_TLV_API_STA_TYPE))
2430         iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2431                    timeout);
2432 
2433     return 0;
2434 err:
2435     iwl_mvm_dealloc_int_sta(mvm, msta);
2436     return ret;
2437 }
2438 
2439 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2440                     struct ieee80211_key_conf *keyconf,
2441                     bool mcast)
2442 {
2443     union {
2444         struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2445         struct iwl_mvm_add_sta_key_cmd cmd;
2446     } u = {};
2447     bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2448                   IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2449     __le16 key_flags;
2450     int ret, size;
2451     u32 status;
2452 
2453     /* This is a valid situation for GTK removal */
2454     if (sta_id == IWL_MVM_INVALID_STA)
2455         return 0;
2456 
2457     key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2458                  STA_KEY_FLG_KEYID_MSK);
2459     key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2460     key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2461 
2462     if (mcast)
2463         key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2464 
2465     /*
2466      * The fields assigned here are in the same location at the start
2467      * of the command, so we can do this union trick.
2468      */
2469     u.cmd.common.key_flags = key_flags;
2470     u.cmd.common.key_offset = keyconf->hw_key_idx;
2471     u.cmd.common.sta_id = sta_id;
2472 
2473     size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2474 
2475     status = ADD_STA_SUCCESS;
2476     ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2477                       &status);
2478 
2479     switch (status) {
2480     case ADD_STA_SUCCESS:
2481         IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2482         break;
2483     default:
2484         ret = -EIO;
2485         IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2486         break;
2487     }
2488 
2489     return ret;
2490 }
2491 
2492 /*
2493  * Send the FW a request to remove the station from it's internal data
2494  * structures, and in addition remove it from the local data structure.
2495  */
2496 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2497 {
2498     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2499     int ret;
2500 
2501     lockdep_assert_held(&mvm->mutex);
2502 
2503     iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2504 
2505     iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
2506 
2507     ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2508     if (ret)
2509         IWL_WARN(mvm, "Failed sending remove station\n");
2510 
2511     return ret;
2512 }
2513 
2514 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2515 {
2516     struct iwl_mvm_delba_data notif = {
2517         .baid = baid,
2518     };
2519 
2520     iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2521                     &notif, sizeof(notif));
2522 };
2523 
2524 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2525                  struct iwl_mvm_baid_data *data)
2526 {
2527     int i;
2528 
2529     iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2530 
2531     for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2532         int j;
2533         struct iwl_mvm_reorder_buffer *reorder_buf =
2534             &data->reorder_buf[i];
2535         struct iwl_mvm_reorder_buf_entry *entries =
2536             &data->entries[i * data->entries_per_queue];
2537 
2538         spin_lock_bh(&reorder_buf->lock);
2539         if (likely(!reorder_buf->num_stored)) {
2540             spin_unlock_bh(&reorder_buf->lock);
2541             continue;
2542         }
2543 
2544         /*
2545          * This shouldn't happen in regular DELBA since the internal
2546          * delBA notification should trigger a release of all frames in
2547          * the reorder buffer.
2548          */
2549         WARN_ON(1);
2550 
2551         for (j = 0; j < reorder_buf->buf_size; j++)
2552             __skb_queue_purge(&entries[j].e.frames);
2553         /*
2554          * Prevent timer re-arm. This prevents a very far fetched case
2555          * where we timed out on the notification. There may be prior
2556          * RX frames pending in the RX queue before the notification
2557          * that might get processed between now and the actual deletion
2558          * and we would re-arm the timer although we are deleting the
2559          * reorder buffer.
2560          */
2561         reorder_buf->removed = true;
2562         spin_unlock_bh(&reorder_buf->lock);
2563         del_timer_sync(&reorder_buf->reorder_timer);
2564     }
2565 }
2566 
2567 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2568                     struct iwl_mvm_baid_data *data,
2569                     u16 ssn, u16 buf_size)
2570 {
2571     int i;
2572 
2573     for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2574         struct iwl_mvm_reorder_buffer *reorder_buf =
2575             &data->reorder_buf[i];
2576         struct iwl_mvm_reorder_buf_entry *entries =
2577             &data->entries[i * data->entries_per_queue];
2578         int j;
2579 
2580         reorder_buf->num_stored = 0;
2581         reorder_buf->head_sn = ssn;
2582         reorder_buf->buf_size = buf_size;
2583         /* rx reorder timer */
2584         timer_setup(&reorder_buf->reorder_timer,
2585                 iwl_mvm_reorder_timer_expired, 0);
2586         spin_lock_init(&reorder_buf->lock);
2587         reorder_buf->mvm = mvm;
2588         reorder_buf->queue = i;
2589         reorder_buf->valid = false;
2590         for (j = 0; j < reorder_buf->buf_size; j++)
2591             __skb_queue_head_init(&entries[j].e.frames);
2592     }
2593 }
2594 
2595 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2596                   struct iwl_mvm_sta *mvm_sta,
2597                   bool start, int tid, u16 ssn,
2598                   u16 buf_size)
2599 {
2600     struct iwl_mvm_add_sta_cmd cmd = {
2601         .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2602         .sta_id = mvm_sta->sta_id,
2603         .add_modify = STA_MODE_MODIFY,
2604     };
2605     u32 status;
2606     int ret;
2607 
2608     if (start) {
2609         cmd.add_immediate_ba_tid = tid;
2610         cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2611         cmd.rx_ba_window = cpu_to_le16(buf_size);
2612         cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2613     } else {
2614         cmd.remove_immediate_ba_tid = tid;
2615         cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2616     }
2617 
2618     status = ADD_STA_SUCCESS;
2619     ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2620                       iwl_mvm_add_sta_cmd_size(mvm),
2621                       &cmd, &status);
2622     if (ret)
2623         return ret;
2624 
2625     switch (status & IWL_ADD_STA_STATUS_MASK) {
2626     case ADD_STA_SUCCESS:
2627         IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2628                  start ? "start" : "stopp");
2629         if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2630                 !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2631             return -EINVAL;
2632         return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2633     case ADD_STA_IMMEDIATE_BA_FAILURE:
2634         IWL_WARN(mvm, "RX BA Session refused by fw\n");
2635         return -ENOSPC;
2636     default:
2637         IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2638             start ? "start" : "stopp", status);
2639         return -EIO;
2640     }
2641 }
2642 
2643 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2644                   struct iwl_mvm_sta *mvm_sta,
2645                   bool start, int tid, u16 ssn,
2646                   u16 buf_size, int baid)
2647 {
2648     struct iwl_rx_baid_cfg_cmd cmd = {
2649         .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2650                   cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2651     };
2652     u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2653     int ret;
2654 
2655     BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2656 
2657     if (start) {
2658         cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2659         cmd.alloc.tid = tid;
2660         cmd.alloc.ssn = cpu_to_le16(ssn);
2661         cmd.alloc.win_size = cpu_to_le16(buf_size);
2662         baid = -EIO;
2663     } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2664         cmd.remove_v1.baid = cpu_to_le32(baid);
2665         BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2666     } else {
2667         cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2668         cmd.remove.tid = cpu_to_le32(tid);
2669     }
2670 
2671     ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2672                       &cmd, &baid);
2673     if (ret)
2674         return ret;
2675 
2676     if (!start) {
2677         /* ignore firmware baid on remove */
2678         baid = 0;
2679     }
2680 
2681     IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2682              start ? "start" : "stopp");
2683 
2684     if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2685         return -EINVAL;
2686 
2687     return baid;
2688 }
2689 
2690 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
2691                   bool start, int tid, u16 ssn, u16 buf_size,
2692                   int baid)
2693 {
2694     if (fw_has_capa(&mvm->fw->ucode_capa,
2695             IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2696         return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
2697                           tid, ssn, buf_size, baid);
2698 
2699     return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
2700                       tid, ssn, buf_size);
2701 }
2702 
2703 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2704                int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2705 {
2706     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2707     struct iwl_mvm_baid_data *baid_data = NULL;
2708     int ret, baid;
2709     u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2710                                    IWL_MAX_BAID_OLD;
2711 
2712     lockdep_assert_held(&mvm->mutex);
2713 
2714     if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2715         IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2716         return -ENOSPC;
2717     }
2718 
2719     if (iwl_mvm_has_new_rx_api(mvm) && start) {
2720         u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2721 
2722         /* sparse doesn't like the __align() so don't check */
2723 #ifndef __CHECKER__
2724         /*
2725          * The division below will be OK if either the cache line size
2726          * can be divided by the entry size (ALIGN will round up) or if
2727          * if the entry size can be divided by the cache line size, in
2728          * which case the ALIGN() will do nothing.
2729          */
2730         BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2731                  sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2732 #endif
2733 
2734         /*
2735          * Upward align the reorder buffer size to fill an entire cache
2736          * line for each queue, to avoid sharing cache lines between
2737          * different queues.
2738          */
2739         reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2740 
2741         /*
2742          * Allocate here so if allocation fails we can bail out early
2743          * before starting the BA session in the firmware
2744          */
2745         baid_data = kzalloc(sizeof(*baid_data) +
2746                     mvm->trans->num_rx_queues *
2747                     reorder_buf_size,
2748                     GFP_KERNEL);
2749         if (!baid_data)
2750             return -ENOMEM;
2751 
2752         /*
2753          * This division is why we need the above BUILD_BUG_ON(),
2754          * if that doesn't hold then this will not be right.
2755          */
2756         baid_data->entries_per_queue =
2757             reorder_buf_size / sizeof(baid_data->entries[0]);
2758     }
2759 
2760     if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2761         baid = mvm_sta->tid_to_baid[tid];
2762     } else {
2763         /* we don't really need it in this case */
2764         baid = -1;
2765     }
2766 
2767     /* Don't send command to remove (start=0) BAID during restart */
2768     if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2769         baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
2770                       baid);
2771 
2772     if (baid < 0) {
2773         ret = baid;
2774         goto out_free;
2775     }
2776 
2777     if (start) {
2778         mvm->rx_ba_sessions++;
2779 
2780         if (!iwl_mvm_has_new_rx_api(mvm))
2781             return 0;
2782 
2783         baid_data->baid = baid;
2784         baid_data->timeout = timeout;
2785         baid_data->last_rx = jiffies;
2786         baid_data->rcu_ptr = &mvm->baid_map[baid];
2787         timer_setup(&baid_data->session_timer,
2788                 iwl_mvm_rx_agg_session_expired, 0);
2789         baid_data->mvm = mvm;
2790         baid_data->tid = tid;
2791         baid_data->sta_id = mvm_sta->sta_id;
2792 
2793         mvm_sta->tid_to_baid[tid] = baid;
2794         if (timeout)
2795             mod_timer(&baid_data->session_timer,
2796                   TU_TO_EXP_TIME(timeout * 2));
2797 
2798         iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2799         /*
2800          * protect the BA data with RCU to cover a case where our
2801          * internal RX sync mechanism will timeout (not that it's
2802          * supposed to happen) and we will free the session data while
2803          * RX is being processed in parallel
2804          */
2805         IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2806                  mvm_sta->sta_id, tid, baid);
2807         WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2808         rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2809     } else  {
2810         baid = mvm_sta->tid_to_baid[tid];
2811 
2812         if (mvm->rx_ba_sessions > 0)
2813             /* check that restart flow didn't zero the counter */
2814             mvm->rx_ba_sessions--;
2815         if (!iwl_mvm_has_new_rx_api(mvm))
2816             return 0;
2817 
2818         if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2819             return -EINVAL;
2820 
2821         baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2822         if (WARN_ON(!baid_data))
2823             return -EINVAL;
2824 
2825         /* synchronize all rx queues so we can safely delete */
2826         iwl_mvm_free_reorder(mvm, baid_data);
2827         del_timer_sync(&baid_data->session_timer);
2828         RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2829         kfree_rcu(baid_data, rcu_head);
2830         IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2831 
2832         /*
2833          * After we've deleted it, do another queue sync
2834          * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
2835          * running it won't find a new session in the old
2836          * BAID. It can find the NULL pointer for the BAID,
2837          * but we must not have it find a different session.
2838          */
2839         iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2840                         true, NULL, 0);
2841     }
2842     return 0;
2843 
2844 out_free:
2845     kfree(baid_data);
2846     return ret;
2847 }
2848 
2849 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2850                int tid, u8 queue, bool start)
2851 {
2852     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2853     struct iwl_mvm_add_sta_cmd cmd = {};
2854     int ret;
2855     u32 status;
2856 
2857     lockdep_assert_held(&mvm->mutex);
2858 
2859     if (start) {
2860         mvm_sta->tfd_queue_msk |= BIT(queue);
2861         mvm_sta->tid_disable_agg &= ~BIT(tid);
2862     } else {
2863         /* In DQA-mode the queue isn't removed on agg termination */
2864         mvm_sta->tid_disable_agg |= BIT(tid);
2865     }
2866 
2867     cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2868     cmd.sta_id = mvm_sta->sta_id;
2869     cmd.add_modify = STA_MODE_MODIFY;
2870     if (!iwl_mvm_has_new_tx_api(mvm))
2871         cmd.modify_mask = STA_MODIFY_QUEUES;
2872     cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2873     cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2874     cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2875 
2876     status = ADD_STA_SUCCESS;
2877     ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2878                       iwl_mvm_add_sta_cmd_size(mvm),
2879                       &cmd, &status);
2880     if (ret)
2881         return ret;
2882 
2883     switch (status & IWL_ADD_STA_STATUS_MASK) {
2884     case ADD_STA_SUCCESS:
2885         break;
2886     default:
2887         ret = -EIO;
2888         IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2889             start ? "start" : "stopp", status);
2890         break;
2891     }
2892 
2893     return ret;
2894 }
2895 
2896 const u8 tid_to_mac80211_ac[] = {
2897     IEEE80211_AC_BE,
2898     IEEE80211_AC_BK,
2899     IEEE80211_AC_BK,
2900     IEEE80211_AC_BE,
2901     IEEE80211_AC_VI,
2902     IEEE80211_AC_VI,
2903     IEEE80211_AC_VO,
2904     IEEE80211_AC_VO,
2905     IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2906 };
2907 
2908 static const u8 tid_to_ucode_ac[] = {
2909     AC_BE,
2910     AC_BK,
2911     AC_BK,
2912     AC_BE,
2913     AC_VI,
2914     AC_VI,
2915     AC_VO,
2916     AC_VO,
2917 };
2918 
2919 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2920                  struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2921 {
2922     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2923     struct iwl_mvm_tid_data *tid_data;
2924     u16 normalized_ssn;
2925     u16 txq_id;
2926     int ret;
2927 
2928     if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2929         return -EINVAL;
2930 
2931     if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2932         mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2933         IWL_ERR(mvm,
2934             "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2935             mvmsta->tid_data[tid].state);
2936         return -ENXIO;
2937     }
2938 
2939     lockdep_assert_held(&mvm->mutex);
2940 
2941     if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2942         iwl_mvm_has_new_tx_api(mvm)) {
2943         u8 ac = tid_to_mac80211_ac[tid];
2944 
2945         ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2946         if (ret)
2947             return ret;
2948     }
2949 
2950     spin_lock_bh(&mvmsta->lock);
2951 
2952     /*
2953      * Note the possible cases:
2954      *  1. An enabled TXQ - TXQ needs to become agg'ed
2955      *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2956      *  it as reserved
2957      */
2958     txq_id = mvmsta->tid_data[tid].txq_id;
2959     if (txq_id == IWL_MVM_INVALID_QUEUE) {
2960         ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2961                           IWL_MVM_DQA_MIN_DATA_QUEUE,
2962                           IWL_MVM_DQA_MAX_DATA_QUEUE);
2963         if (ret < 0) {
2964             IWL_ERR(mvm, "Failed to allocate agg queue\n");
2965             goto out;
2966         }
2967 
2968         txq_id = ret;
2969 
2970         /* TXQ hasn't yet been enabled, so mark it only as reserved */
2971         mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2972     } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2973         ret = -ENXIO;
2974         IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2975             tid, IWL_MAX_HW_QUEUES - 1);
2976         goto out;
2977 
2978     } else if (unlikely(mvm->queue_info[txq_id].status ==
2979                 IWL_MVM_QUEUE_SHARED)) {
2980         ret = -ENXIO;
2981         IWL_DEBUG_TX_QUEUES(mvm,
2982                     "Can't start tid %d agg on shared queue!\n",
2983                     tid);
2984         goto out;
2985     }
2986 
2987     IWL_DEBUG_TX_QUEUES(mvm,
2988                 "AGG for tid %d will be on queue #%d\n",
2989                 tid, txq_id);
2990 
2991     tid_data = &mvmsta->tid_data[tid];
2992     tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2993     tid_data->txq_id = txq_id;
2994     *ssn = tid_data->ssn;
2995 
2996     IWL_DEBUG_TX_QUEUES(mvm,
2997                 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2998                 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2999                 tid_data->next_reclaimed);
3000 
3001     /*
3002      * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3003      * to align the wrap around of ssn so we compare relevant values.
3004      */
3005     normalized_ssn = tid_data->ssn;
3006     if (mvm->trans->trans_cfg->gen2)
3007         normalized_ssn &= 0xff;
3008 
3009     if (normalized_ssn == tid_data->next_reclaimed) {
3010         tid_data->state = IWL_AGG_STARTING;
3011         ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3012     } else {
3013         tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3014         ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3015     }
3016 
3017 out:
3018     spin_unlock_bh(&mvmsta->lock);
3019 
3020     return ret;
3021 }
3022 
3023 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3024                 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3025                 bool amsdu)
3026 {
3027     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3028     struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3029     unsigned int wdg_timeout =
3030         iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3031     int queue, ret;
3032     bool alloc_queue = true;
3033     enum iwl_mvm_queue_status queue_status;
3034     u16 ssn;
3035 
3036     struct iwl_trans_txq_scd_cfg cfg = {
3037         .sta_id = mvmsta->sta_id,
3038         .tid = tid,
3039         .frame_limit = buf_size,
3040         .aggregate = true,
3041     };
3042 
3043     /*
3044      * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3045      * manager, so this function should never be called in this case.
3046      */
3047     if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3048         return -EINVAL;
3049 
3050     BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3051              != IWL_MAX_TID_COUNT);
3052 
3053     spin_lock_bh(&mvmsta->lock);
3054     ssn = tid_data->ssn;
3055     queue = tid_data->txq_id;
3056     tid_data->state = IWL_AGG_ON;
3057     mvmsta->agg_tids |= BIT(tid);
3058     tid_data->ssn = 0xffff;
3059     tid_data->amsdu_in_ampdu_allowed = amsdu;
3060     spin_unlock_bh(&mvmsta->lock);
3061 
3062     if (iwl_mvm_has_new_tx_api(mvm)) {
3063         /*
3064          * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3065          * would have failed, so if we are here there is no need to
3066          * allocate a queue.
3067          * However, if aggregation size is different than the default
3068          * size, the scheduler should be reconfigured.
3069          * We cannot do this with the new TX API, so return unsupported
3070          * for now, until it will be offloaded to firmware..
3071          * Note that if SCD default value changes - this condition
3072          * should be updated as well.
3073          */
3074         if (buf_size < IWL_FRAME_LIMIT)
3075             return -ENOTSUPP;
3076 
3077         ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3078         if (ret)
3079             return -EIO;
3080         goto out;
3081     }
3082 
3083     cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3084 
3085     queue_status = mvm->queue_info[queue].status;
3086 
3087     /* Maybe there is no need to even alloc a queue... */
3088     if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3089         alloc_queue = false;
3090 
3091     /*
3092      * Only reconfig the SCD for the queue if the window size has
3093      * changed from current (become smaller)
3094      */
3095     if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3096         /*
3097          * If reconfiguring an existing queue, it first must be
3098          * drained
3099          */
3100         ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3101                              BIT(queue));
3102         if (ret) {
3103             IWL_ERR(mvm,
3104                 "Error draining queue before reconfig\n");
3105             return ret;
3106         }
3107 
3108         ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3109                        mvmsta->sta_id, tid,
3110                        buf_size, ssn);
3111         if (ret) {
3112             IWL_ERR(mvm,
3113                 "Error reconfiguring TXQ #%d\n", queue);
3114             return ret;
3115         }
3116     }
3117 
3118     if (alloc_queue)
3119         iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3120                    &cfg, wdg_timeout);
3121 
3122     /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3123     if (queue_status != IWL_MVM_QUEUE_SHARED) {
3124         ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3125         if (ret)
3126             return -EIO;
3127     }
3128 
3129     /* No need to mark as reserved */
3130     mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3131 
3132 out:
3133     /*
3134      * Even though in theory the peer could have different
3135      * aggregation reorder buffer sizes for different sessions,
3136      * our ucode doesn't allow for that and has a global limit
3137      * for each station. Therefore, use the minimum of all the
3138      * aggregation sessions and our default value.
3139      */
3140     mvmsta->max_agg_bufsize =
3141         min(mvmsta->max_agg_bufsize, buf_size);
3142     mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3143 
3144     IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3145              sta->addr, tid);
3146 
3147     return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3148 }
3149 
3150 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3151                     struct iwl_mvm_sta *mvmsta,
3152                     struct iwl_mvm_tid_data *tid_data)
3153 {
3154     u16 txq_id = tid_data->txq_id;
3155 
3156     lockdep_assert_held(&mvm->mutex);
3157 
3158     if (iwl_mvm_has_new_tx_api(mvm))
3159         return;
3160 
3161     /*
3162      * The TXQ is marked as reserved only if no traffic came through yet
3163      * This means no traffic has been sent on this TID (agg'd or not), so
3164      * we no longer have use for the queue. Since it hasn't even been
3165      * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3166      * free.
3167      */
3168     if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3169         mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3170         tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3171     }
3172 }
3173 
3174 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3175                 struct ieee80211_sta *sta, u16 tid)
3176 {
3177     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3178     struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3179     u16 txq_id;
3180     int err;
3181 
3182     /*
3183      * If mac80211 is cleaning its state, then say that we finished since
3184      * our state has been cleared anyway.
3185      */
3186     if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3187         ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3188         return 0;
3189     }
3190 
3191     spin_lock_bh(&mvmsta->lock);
3192 
3193     txq_id = tid_data->txq_id;
3194 
3195     IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3196                 mvmsta->sta_id, tid, txq_id, tid_data->state);
3197 
3198     mvmsta->agg_tids &= ~BIT(tid);
3199 
3200     iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3201 
3202     switch (tid_data->state) {
3203     case IWL_AGG_ON:
3204         tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3205 
3206         IWL_DEBUG_TX_QUEUES(mvm,
3207                     "ssn = %d, next_recl = %d\n",
3208                     tid_data->ssn, tid_data->next_reclaimed);
3209 
3210         tid_data->ssn = 0xffff;
3211         tid_data->state = IWL_AGG_OFF;
3212         spin_unlock_bh(&mvmsta->lock);
3213 
3214         ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3215 
3216         iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3217         return 0;
3218     case IWL_AGG_STARTING:
3219     case IWL_EMPTYING_HW_QUEUE_ADDBA:
3220         /*
3221          * The agg session has been stopped before it was set up. This
3222          * can happen when the AddBA timer times out for example.
3223          */
3224 
3225         /* No barriers since we are under mutex */
3226         lockdep_assert_held(&mvm->mutex);
3227 
3228         ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3229         tid_data->state = IWL_AGG_OFF;
3230         err = 0;
3231         break;
3232     default:
3233         IWL_ERR(mvm,
3234             "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3235             mvmsta->sta_id, tid, tid_data->state);
3236         IWL_ERR(mvm,
3237             "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3238         err = -EINVAL;
3239     }
3240 
3241     spin_unlock_bh(&mvmsta->lock);
3242 
3243     return err;
3244 }
3245 
3246 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3247                 struct ieee80211_sta *sta, u16 tid)
3248 {
3249     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3250     struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3251     u16 txq_id;
3252     enum iwl_mvm_agg_state old_state;
3253 
3254     /*
3255      * First set the agg state to OFF to avoid calling
3256      * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3257      */
3258     spin_lock_bh(&mvmsta->lock);
3259     txq_id = tid_data->txq_id;
3260     IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3261                 mvmsta->sta_id, tid, txq_id, tid_data->state);
3262     old_state = tid_data->state;
3263     tid_data->state = IWL_AGG_OFF;
3264     mvmsta->agg_tids &= ~BIT(tid);
3265     spin_unlock_bh(&mvmsta->lock);
3266 
3267     iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3268 
3269     if (old_state >= IWL_AGG_ON) {
3270         iwl_mvm_drain_sta(mvm, mvmsta, true);
3271 
3272         if (iwl_mvm_has_new_tx_api(mvm)) {
3273             if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3274                            BIT(tid)))
3275                 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3276             iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3277         } else {
3278             if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3279                 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3280             iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3281         }
3282 
3283         iwl_mvm_drain_sta(mvm, mvmsta, false);
3284 
3285         iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3286     }
3287 
3288     return 0;
3289 }
3290 
3291 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3292 {
3293     int i, max = -1, max_offs = -1;
3294 
3295     lockdep_assert_held(&mvm->mutex);
3296 
3297     /* Pick the unused key offset with the highest 'deleted'
3298      * counter. Every time a key is deleted, all the counters
3299      * are incremented and the one that was just deleted is
3300      * reset to zero. Thus, the highest counter is the one
3301      * that was deleted longest ago. Pick that one.
3302      */
3303     for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3304         if (test_bit(i, mvm->fw_key_table))
3305             continue;
3306         if (mvm->fw_key_deleted[i] > max) {
3307             max = mvm->fw_key_deleted[i];
3308             max_offs = i;
3309         }
3310     }
3311 
3312     if (max_offs < 0)
3313         return STA_KEY_IDX_INVALID;
3314 
3315     return max_offs;
3316 }
3317 
3318 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3319                            struct ieee80211_vif *vif,
3320                            struct ieee80211_sta *sta)
3321 {
3322     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3323 
3324     if (sta)
3325         return iwl_mvm_sta_from_mac80211(sta);
3326 
3327     /*
3328      * The device expects GTKs for station interfaces to be
3329      * installed as GTKs for the AP station. If we have no
3330      * station ID, then use AP's station ID.
3331      */
3332     if (vif->type == NL80211_IFTYPE_STATION &&
3333         mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3334         u8 sta_id = mvmvif->ap_sta_id;
3335 
3336         sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3337                         lockdep_is_held(&mvm->mutex));
3338 
3339         /*
3340          * It is possible that the 'sta' parameter is NULL,
3341          * for example when a GTK is removed - the sta_id will then
3342          * be the AP ID, and no station was passed by mac80211.
3343          */
3344         if (IS_ERR_OR_NULL(sta))
3345             return NULL;
3346 
3347         return iwl_mvm_sta_from_mac80211(sta);
3348     }
3349 
3350     return NULL;
3351 }
3352 
3353 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3354 {
3355     int i;
3356 
3357     for (i = len - 1; i >= 0; i--) {
3358         if (pn1[i] > pn2[i])
3359             return 1;
3360         if (pn1[i] < pn2[i])
3361             return -1;
3362     }
3363 
3364     return 0;
3365 }
3366 
3367 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3368                 u32 sta_id,
3369                 struct ieee80211_key_conf *key, bool mcast,
3370                 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3371                 u8 key_offset, bool mfp)
3372 {
3373     union {
3374         struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3375         struct iwl_mvm_add_sta_key_cmd cmd;
3376     } u = {};
3377     __le16 key_flags;
3378     int ret;
3379     u32 status;
3380     u16 keyidx;
3381     u64 pn = 0;
3382     int i, size;
3383     bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3384                   IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3385     int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3386                         new_api ? 2 : 1);
3387 
3388     if (sta_id == IWL_MVM_INVALID_STA)
3389         return -EINVAL;
3390 
3391     keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3392          STA_KEY_FLG_KEYID_MSK;
3393     key_flags = cpu_to_le16(keyidx);
3394     key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3395 
3396     switch (key->cipher) {
3397     case WLAN_CIPHER_SUITE_TKIP:
3398         key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3399         if (api_ver >= 2) {
3400             memcpy((void *)&u.cmd.tx_mic_key,
3401                    &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3402                    IWL_MIC_KEY_SIZE);
3403 
3404             memcpy((void *)&u.cmd.rx_mic_key,
3405                    &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3406                    IWL_MIC_KEY_SIZE);
3407             pn = atomic64_read(&key->tx_pn);
3408 
3409         } else {
3410             u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3411             for (i = 0; i < 5; i++)
3412                 u.cmd_v1.tkip_rx_ttak[i] =
3413                     cpu_to_le16(tkip_p1k[i]);
3414         }
3415         memcpy(u.cmd.common.key, key->key, key->keylen);
3416         break;
3417     case WLAN_CIPHER_SUITE_CCMP:
3418         key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3419         memcpy(u.cmd.common.key, key->key, key->keylen);
3420         if (api_ver >= 2)
3421             pn = atomic64_read(&key->tx_pn);
3422         break;
3423     case WLAN_CIPHER_SUITE_WEP104:
3424         key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3425         fallthrough;
3426     case WLAN_CIPHER_SUITE_WEP40:
3427         key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3428         memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3429         break;
3430     case WLAN_CIPHER_SUITE_GCMP_256:
3431         key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3432         fallthrough;
3433     case WLAN_CIPHER_SUITE_GCMP:
3434         key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3435         memcpy(u.cmd.common.key, key->key, key->keylen);
3436         if (api_ver >= 2)
3437             pn = atomic64_read(&key->tx_pn);
3438         break;
3439     default:
3440         key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3441         memcpy(u.cmd.common.key, key->key, key->keylen);
3442     }
3443 
3444     if (mcast)
3445         key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3446     if (mfp)
3447         key_flags |= cpu_to_le16(STA_KEY_MFP);
3448 
3449     u.cmd.common.key_offset = key_offset;
3450     u.cmd.common.key_flags = key_flags;
3451     u.cmd.common.sta_id = sta_id;
3452 
3453     if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3454         i = 0;
3455     else
3456         i = -1;
3457 
3458     for (; i < IEEE80211_NUM_TIDS; i++) {
3459         struct ieee80211_key_seq seq = {};
3460         u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3461         int rx_pn_len = 8;
3462         /* there's a hole at 2/3 in FW format depending on version */
3463         int hole = api_ver >= 3 ? 0 : 2;
3464 
3465         ieee80211_get_key_rx_seq(key, i, &seq);
3466 
3467         if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3468             rx_pn[0] = seq.tkip.iv16;
3469             rx_pn[1] = seq.tkip.iv16 >> 8;
3470             rx_pn[2 + hole] = seq.tkip.iv32;
3471             rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3472             rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3473             rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3474         } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3475             rx_pn = seq.hw.seq;
3476             rx_pn_len = seq.hw.seq_len;
3477         } else {
3478             rx_pn[0] = seq.ccmp.pn[0];
3479             rx_pn[1] = seq.ccmp.pn[1];
3480             rx_pn[2 + hole] = seq.ccmp.pn[2];
3481             rx_pn[3 + hole] = seq.ccmp.pn[3];
3482             rx_pn[4 + hole] = seq.ccmp.pn[4];
3483             rx_pn[5 + hole] = seq.ccmp.pn[5];
3484         }
3485 
3486         if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3487                    rx_pn_len) > 0)
3488             memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3489                    rx_pn_len);
3490     }
3491 
3492     if (api_ver >= 2) {
3493         u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3494         size = sizeof(u.cmd);
3495     } else {
3496         size = sizeof(u.cmd_v1);
3497     }
3498 
3499     status = ADD_STA_SUCCESS;
3500     if (cmd_flags & CMD_ASYNC)
3501         ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3502                        &u.cmd);
3503     else
3504         ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3505                           &u.cmd, &status);
3506 
3507     switch (status) {
3508     case ADD_STA_SUCCESS:
3509         IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3510         break;
3511     default:
3512         ret = -EIO;
3513         IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3514         break;
3515     }
3516 
3517     return ret;
3518 }
3519 
3520 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3521                  struct ieee80211_key_conf *keyconf,
3522                  u8 sta_id, bool remove_key)
3523 {
3524     struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3525 
3526     /* verify the key details match the required command's expectations */
3527     if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3528             (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3529              keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3530             (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3531              keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3532              keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3533         return -EINVAL;
3534 
3535     if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3536             keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3537         return -EINVAL;
3538 
3539     igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3540     igtk_cmd.sta_id = cpu_to_le32(sta_id);
3541 
3542     if (remove_key) {
3543         /* This is a valid situation for IGTK */
3544         if (sta_id == IWL_MVM_INVALID_STA)
3545             return 0;
3546 
3547         igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3548     } else {
3549         struct ieee80211_key_seq seq;
3550         const u8 *pn;
3551 
3552         switch (keyconf->cipher) {
3553         case WLAN_CIPHER_SUITE_AES_CMAC:
3554             igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3555             break;
3556         case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3557         case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3558             igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3559             break;
3560         default:
3561             return -EINVAL;
3562         }
3563 
3564         memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3565         if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3566             igtk_cmd.ctrl_flags |=
3567                 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3568         ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3569         pn = seq.aes_cmac.pn;
3570         igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3571                                ((u64) pn[4] << 8) |
3572                                ((u64) pn[3] << 16) |
3573                                ((u64) pn[2] << 24) |
3574                                ((u64) pn[1] << 32) |
3575                                ((u64) pn[0] << 40));
3576     }
3577 
3578     IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3579                remove_key ? "removing" : "installing",
3580                keyconf->keyidx >= 6 ? "B" : "",
3581                keyconf->keyidx, igtk_cmd.sta_id);
3582 
3583     if (!iwl_mvm_has_new_rx_api(mvm)) {
3584         struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3585             .ctrl_flags = igtk_cmd.ctrl_flags,
3586             .key_id = igtk_cmd.key_id,
3587             .sta_id = igtk_cmd.sta_id,
3588             .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3589         };
3590 
3591         memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3592                ARRAY_SIZE(igtk_cmd_v1.igtk));
3593         return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3594                         sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3595     }
3596     return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3597                     sizeof(igtk_cmd), &igtk_cmd);
3598 }
3599 
3600 
3601 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3602                        struct ieee80211_vif *vif,
3603                        struct ieee80211_sta *sta)
3604 {
3605     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3606 
3607     if (sta)
3608         return sta->addr;
3609 
3610     if (vif->type == NL80211_IFTYPE_STATION &&
3611         mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3612         u8 sta_id = mvmvif->ap_sta_id;
3613         sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3614                         lockdep_is_held(&mvm->mutex));
3615         return sta->addr;
3616     }
3617 
3618 
3619     return NULL;
3620 }
3621 
3622 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3623                  struct ieee80211_vif *vif,
3624                  struct ieee80211_sta *sta,
3625                  struct ieee80211_key_conf *keyconf,
3626                  u8 key_offset,
3627                  bool mcast)
3628 {
3629     const u8 *addr;
3630     struct ieee80211_key_seq seq;
3631     u16 p1k[5];
3632     u32 sta_id;
3633     bool mfp = false;
3634 
3635     if (sta) {
3636         struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3637 
3638         sta_id = mvm_sta->sta_id;
3639         mfp = sta->mfp;
3640     } else if (vif->type == NL80211_IFTYPE_AP &&
3641            !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3642         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3643 
3644         sta_id = mvmvif->mcast_sta.sta_id;
3645     } else {
3646         IWL_ERR(mvm, "Failed to find station id\n");
3647         return -EINVAL;
3648     }
3649 
3650     if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3651         addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3652         /* get phase 1 key from mac80211 */
3653         ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3654         ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3655 
3656         return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3657                         seq.tkip.iv32, p1k, 0, key_offset,
3658                         mfp);
3659     }
3660 
3661     return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3662                     0, NULL, 0, key_offset, mfp);
3663 }
3664 
3665 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3666             struct ieee80211_vif *vif,
3667             struct ieee80211_sta *sta,
3668             struct ieee80211_key_conf *keyconf,
3669             u8 key_offset)
3670 {
3671     bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3672     struct iwl_mvm_sta *mvm_sta;
3673     u8 sta_id = IWL_MVM_INVALID_STA;
3674     int ret;
3675     static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3676 
3677     lockdep_assert_held(&mvm->mutex);
3678 
3679     if (vif->type != NL80211_IFTYPE_AP ||
3680         keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3681         /* Get the station id from the mvm local station table */
3682         mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3683         if (!mvm_sta) {
3684             IWL_ERR(mvm, "Failed to find station\n");
3685             return -EINVAL;
3686         }
3687         sta_id = mvm_sta->sta_id;
3688 
3689         /*
3690          * It is possible that the 'sta' parameter is NULL, and thus
3691          * there is a need to retrieve the sta from the local station
3692          * table.
3693          */
3694         if (!sta) {
3695             sta = rcu_dereference_protected(
3696                 mvm->fw_id_to_mac_id[sta_id],
3697                 lockdep_is_held(&mvm->mutex));
3698             if (IS_ERR_OR_NULL(sta)) {
3699                 IWL_ERR(mvm, "Invalid station id\n");
3700                 return -EINVAL;
3701             }
3702         }
3703 
3704         if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3705             return -EINVAL;
3706     } else {
3707         struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3708 
3709         sta_id = mvmvif->mcast_sta.sta_id;
3710     }
3711 
3712     if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3713         keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3714         keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3715         ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3716         goto end;
3717     }
3718 
3719     /* If the key_offset is not pre-assigned, we need to find a
3720      * new offset to use.  In normal cases, the offset is not
3721      * pre-assigned, but during HW_RESTART we want to reuse the
3722      * same indices, so we pass them when this function is called.
3723      *
3724      * In D3 entry, we need to hardcoded the indices (because the
3725      * firmware hardcodes the PTK offset to 0).  In this case, we
3726      * need to make sure we don't overwrite the hw_key_idx in the
3727      * keyconf structure, because otherwise we cannot configure
3728      * the original ones back when resuming.
3729      */
3730     if (key_offset == STA_KEY_IDX_INVALID) {
3731         key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3732         if (key_offset == STA_KEY_IDX_INVALID)
3733             return -ENOSPC;
3734         keyconf->hw_key_idx = key_offset;
3735     }
3736 
3737     ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3738     if (ret)
3739         goto end;
3740 
3741     /*
3742      * For WEP, the same key is used for multicast and unicast. Upload it
3743      * again, using the same key offset, and now pointing the other one
3744      * to the same key slot (offset).
3745      * If this fails, remove the original as well.
3746      */
3747     if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3748          keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3749         sta) {
3750         ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3751                         key_offset, !mcast);
3752         if (ret) {
3753             __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3754             goto end;
3755         }
3756     }
3757 
3758     __set_bit(key_offset, mvm->fw_key_table);
3759 
3760 end:
3761     IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3762               keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3763               sta ? sta->addr : zero_addr, ret);
3764     return ret;
3765 }
3766 
3767 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3768                struct ieee80211_vif *vif,
3769                struct ieee80211_sta *sta,
3770                struct ieee80211_key_conf *keyconf)
3771 {
3772     bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3773     struct iwl_mvm_sta *mvm_sta;
3774     u8 sta_id = IWL_MVM_INVALID_STA;
3775     int ret, i;
3776 
3777     lockdep_assert_held(&mvm->mutex);
3778 
3779     /* Get the station from the mvm local station table */
3780     mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3781     if (mvm_sta)
3782         sta_id = mvm_sta->sta_id;
3783     else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3784         sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3785 
3786 
3787     IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3788               keyconf->keyidx, sta_id);
3789 
3790     if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3791         keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3792         keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3793         return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3794 
3795     if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3796         IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3797             keyconf->hw_key_idx);
3798         return -ENOENT;
3799     }
3800 
3801     /* track which key was deleted last */
3802     for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3803         if (mvm->fw_key_deleted[i] < U8_MAX)
3804             mvm->fw_key_deleted[i]++;
3805     }
3806     mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3807 
3808     if (sta && !mvm_sta) {
3809         IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3810         return 0;
3811     }
3812 
3813     ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3814     if (ret)
3815         return ret;
3816 
3817     /* delete WEP key twice to get rid of (now useless) offset */
3818     if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3819         keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3820         ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3821 
3822     return ret;
3823 }
3824 
3825 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3826                  struct ieee80211_vif *vif,
3827                  struct ieee80211_key_conf *keyconf,
3828                  struct ieee80211_sta *sta, u32 iv32,
3829                  u16 *phase1key)
3830 {
3831     struct iwl_mvm_sta *mvm_sta;
3832     bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3833     bool mfp = sta ? sta->mfp : false;
3834 
3835     rcu_read_lock();
3836 
3837     mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3838     if (WARN_ON_ONCE(!mvm_sta))
3839         goto unlock;
3840     iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3841                  iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3842                  mfp);
3843 
3844  unlock:
3845     rcu_read_unlock();
3846 }
3847 
3848 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3849                 struct ieee80211_sta *sta)
3850 {
3851     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3852     struct iwl_mvm_add_sta_cmd cmd = {
3853         .add_modify = STA_MODE_MODIFY,
3854         .sta_id = mvmsta->sta_id,
3855         .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3856         .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3857     };
3858     int ret;
3859 
3860     ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3861                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3862     if (ret)
3863         IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3864 }
3865 
3866 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3867                        struct ieee80211_sta *sta,
3868                        enum ieee80211_frame_release_type reason,
3869                        u16 cnt, u16 tids, bool more_data,
3870                        bool single_sta_queue)
3871 {
3872     struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3873     struct iwl_mvm_add_sta_cmd cmd = {
3874         .add_modify = STA_MODE_MODIFY,
3875         .sta_id = mvmsta->sta_id,
3876         .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3877         .sleep_tx_count = cpu_to_le16(cnt),
3878         .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3879     };
3880     int tid, ret;
3881     unsigned long _tids = tids;
3882 
3883     /* convert TIDs to ACs - we don't support TSPEC so that's OK
3884      * Note that this field is reserved and unused by firmware not
3885      * supporting GO uAPSD, so it's safe to always do this.
3886      */
3887     for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3888         cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3889 
3890     /* If we're releasing frames from aggregation or dqa queues then check
3891      * if all the queues that we're releasing frames from, combined, have:
3892      *  - more frames than the service period, in which case more_data
3893      *    needs to be set
3894      *  - fewer than 'cnt' frames, in which case we need to adjust the
3895      *    firmware command (but do that unconditionally)
3896      */
3897     if (single_sta_queue) {
3898         int remaining = cnt;
3899         int sleep_tx_count;
3900 
3901         spin_lock_bh(&mvmsta->lock);
3902         for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3903             struct iwl_mvm_tid_data *tid_data;
3904             u16 n_queued;
3905 
3906             tid_data = &mvmsta->tid_data[tid];
3907 
3908             n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3909             if (n_queued > remaining) {
3910                 more_data = true;
3911                 remaining = 0;
3912                 break;
3913             }
3914             remaining -= n_queued;
3915         }
3916         sleep_tx_count = cnt - remaining;
3917         if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3918             mvmsta->sleep_tx_count = sleep_tx_count;
3919         spin_unlock_bh(&mvmsta->lock);
3920 
3921         cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3922         if (WARN_ON(cnt - remaining == 0)) {
3923             ieee80211_sta_eosp(sta);
3924             return;
3925         }
3926     }
3927 
3928     /* Note: this is ignored by firmware not supporting GO uAPSD */
3929     if (more_data)
3930         cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3931 
3932     if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3933         mvmsta->next_status_eosp = true;
3934         cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3935     } else {
3936         cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3937     }
3938 
3939     /* block the Tx queues until the FW updated the sleep Tx count */
3940     iwl_trans_block_txq_ptrs(mvm->trans, true);
3941 
3942     ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3943                    CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3944                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3945     if (ret)
3946         IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3947 }
3948 
3949 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3950                struct iwl_rx_cmd_buffer *rxb)
3951 {
3952     struct iwl_rx_packet *pkt = rxb_addr(rxb);
3953     struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3954     struct ieee80211_sta *sta;
3955     u32 sta_id = le32_to_cpu(notif->sta_id);
3956 
3957     if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3958         return;
3959 
3960     rcu_read_lock();
3961     sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3962     if (!IS_ERR_OR_NULL(sta))
3963         ieee80211_sta_eosp(sta);
3964     rcu_read_unlock();
3965 }
3966 
3967 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3968                    struct iwl_mvm_sta *mvmsta, bool disable)
3969 {
3970     struct iwl_mvm_add_sta_cmd cmd = {
3971         .add_modify = STA_MODE_MODIFY,
3972         .sta_id = mvmsta->sta_id,
3973         .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3974         .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3975         .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3976     };
3977     int ret;
3978 
3979     ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3980                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3981     if (ret)
3982         IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3983 }
3984 
3985 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3986                       struct ieee80211_sta *sta,
3987                       bool disable)
3988 {
3989     struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3990 
3991     spin_lock_bh(&mvm_sta->lock);
3992 
3993     if (mvm_sta->disable_tx == disable) {
3994         spin_unlock_bh(&mvm_sta->lock);
3995         return;
3996     }
3997 
3998     mvm_sta->disable_tx = disable;
3999 
4000     /*
4001      * If sta PS state is handled by mac80211, tell it to start/stop
4002      * queuing tx for this station.
4003      */
4004     if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4005         ieee80211_sta_block_awake(mvm->hw, sta, disable);
4006 
4007     iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4008 
4009     spin_unlock_bh(&mvm_sta->lock);
4010 }
4011 
4012 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4013                           struct iwl_mvm_vif *mvmvif,
4014                           struct iwl_mvm_int_sta *sta,
4015                           bool disable)
4016 {
4017     u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4018     struct iwl_mvm_add_sta_cmd cmd = {
4019         .add_modify = STA_MODE_MODIFY,
4020         .sta_id = sta->sta_id,
4021         .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4022         .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4023         .mac_id_n_color = cpu_to_le32(id),
4024     };
4025     int ret;
4026 
4027     ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4028                    iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4029     if (ret)
4030         IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4031 }
4032 
4033 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4034                        struct iwl_mvm_vif *mvmvif,
4035                        bool disable)
4036 {
4037     struct ieee80211_sta *sta;
4038     struct iwl_mvm_sta *mvm_sta;
4039     int i;
4040 
4041     rcu_read_lock();
4042 
4043     /* Block/unblock all the stations of the given mvmvif */
4044     for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4045         sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4046         if (IS_ERR_OR_NULL(sta))
4047             continue;
4048 
4049         mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4050         if (mvm_sta->mac_id_n_color !=
4051             FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4052             continue;
4053 
4054         iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4055     }
4056 
4057     rcu_read_unlock();
4058 
4059     if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4060         return;
4061 
4062     /* Need to block/unblock also multicast station */
4063     if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4064         iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4065                           &mvmvif->mcast_sta, disable);
4066 
4067     /*
4068      * Only unblock the broadcast station (FW blocks it for immediate
4069      * quiet, not the driver)
4070      */
4071     if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4072         iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4073                           &mvmvif->bcast_sta, disable);
4074 }
4075 
4076 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4077 {
4078     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4079     struct iwl_mvm_sta *mvmsta;
4080 
4081     rcu_read_lock();
4082 
4083     mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4084 
4085     if (mvmsta)
4086         iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4087 
4088     rcu_read_unlock();
4089 }
4090 
4091 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4092 {
4093     u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4094 
4095     /*
4096      * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4097      * to align the wrap around of ssn so we compare relevant values.
4098      */
4099     if (mvm->trans->trans_cfg->gen2)
4100         sn &= 0xff;
4101 
4102     return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4103 }
4104 
4105 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4106              struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4107              u8 *key, u32 key_len)
4108 {
4109     int ret;
4110     u16 queue;
4111     struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4112     struct ieee80211_key_conf *keyconf;
4113 
4114     ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4115                        NL80211_IFTYPE_UNSPECIFIED,
4116                        IWL_STA_LINK);
4117     if (ret)
4118         return ret;
4119 
4120     ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
4121                          addr, sta, &queue,
4122                          IWL_MVM_TX_FIFO_BE);
4123     if (ret)
4124         goto out;
4125 
4126     keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4127     if (!keyconf) {
4128         ret = -ENOBUFS;
4129         goto out;
4130     }
4131 
4132     keyconf->cipher = cipher;
4133     memcpy(keyconf->key, key, key_len);
4134     keyconf->keylen = key_len;
4135 
4136     ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4137                    0, NULL, 0, 0, true);
4138     kfree(keyconf);
4139     return 0;
4140 out:
4141     iwl_mvm_dealloc_int_sta(mvm, sta);
4142     return ret;
4143 }
4144 
4145 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4146                    struct ieee80211_vif *vif,
4147                    u32 mac_id)
4148 {
4149     struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4150         .mac_id = cpu_to_le32(mac_id),
4151     };
4152     int ret;
4153 
4154     ret = iwl_mvm_send_cmd_pdu(mvm,
4155                    WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4156                    CMD_ASYNC,
4157                    sizeof(cancel_channel_switch_cmd),
4158                    &cancel_channel_switch_cmd);
4159     if (ret)
4160         IWL_ERR(mvm, "Failed to cancel the channel switch\n");
4161 }