Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * HT handling
0004  *
0005  * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
0006  * Copyright 2002-2005, Instant802 Networks, Inc.
0007  * Copyright 2005-2006, Devicescape Software, Inc.
0008  * Copyright 2006-2007  Jiri Benc <jbenc@suse.cz>
0009  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
0010  * Copyright 2007-2010, Intel Corporation
0011  * Copyright(c) 2015-2017 Intel Deutschland GmbH
0012  * Copyright (C) 2018 - 2022 Intel Corporation
0013  */
0014 
0015 #include <linux/ieee80211.h>
0016 #include <linux/slab.h>
0017 #include <linux/export.h>
0018 #include <net/mac80211.h>
0019 #include "ieee80211_i.h"
0020 #include "driver-ops.h"
0021 #include "wme.h"
0022 
0023 /**
0024  * DOC: TX A-MPDU aggregation
0025  *
0026  * Aggregation on the TX side requires setting the hardware flag
0027  * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
0028  * packets with a flag indicating A-MPDU aggregation. The driver
0029  * or device is responsible for actually aggregating the frames,
0030  * as well as deciding how many and which to aggregate.
0031  *
0032  * When TX aggregation is started by some subsystem (usually the rate
0033  * control algorithm would be appropriate) by calling the
0034  * ieee80211_start_tx_ba_session() function, the driver will be
0035  * notified via its @ampdu_action function, with the
0036  * %IEEE80211_AMPDU_TX_START action.
0037  *
0038  * In response to that, the driver is later required to call the
0039  * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
0040  * start the aggregation session after the peer has also responded.
0041  * If the peer responds negatively, the session will be stopped
0042  * again right away. Note that it is possible for the aggregation
0043  * session to be stopped before the driver has indicated that it
0044  * is done setting it up, in which case it must not indicate the
0045  * setup completion.
0046  *
0047  * Also note that, since we also need to wait for a response from
0048  * the peer, the driver is notified of the completion of the
0049  * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
0050  * @ampdu_action callback.
0051  *
0052  * Similarly, when the aggregation session is stopped by the peer
0053  * or something calling ieee80211_stop_tx_ba_session(), the driver's
0054  * @ampdu_action function will be called with the action
0055  * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
0056  * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
0057  * Note that the sta can get destroyed before the BA tear down is
0058  * complete.
0059  */
0060 
0061 static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
0062                      const u8 *da, u16 tid,
0063                      u8 dialog_token, u16 start_seq_num,
0064                      u16 agg_size, u16 timeout)
0065 {
0066     struct ieee80211_local *local = sdata->local;
0067     struct sk_buff *skb;
0068     struct ieee80211_mgmt *mgmt;
0069     u16 capab;
0070 
0071     skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
0072 
0073     if (!skb)
0074         return;
0075 
0076     skb_reserve(skb, local->hw.extra_tx_headroom);
0077     mgmt = skb_put_zero(skb, 24);
0078     memcpy(mgmt->da, da, ETH_ALEN);
0079     memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
0080     if (sdata->vif.type == NL80211_IFTYPE_AP ||
0081         sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
0082         sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
0083         memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
0084     else if (sdata->vif.type == NL80211_IFTYPE_STATION)
0085         memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
0086     else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
0087         memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
0088 
0089     mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
0090                       IEEE80211_STYPE_ACTION);
0091 
0092     skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
0093 
0094     mgmt->u.action.category = WLAN_CATEGORY_BACK;
0095     mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
0096 
0097     mgmt->u.action.u.addba_req.dialog_token = dialog_token;
0098     capab = IEEE80211_ADDBA_PARAM_AMSDU_MASK;
0099     capab |= IEEE80211_ADDBA_PARAM_POLICY_MASK;
0100     capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK);
0101     capab |= u16_encode_bits(agg_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
0102 
0103     mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
0104 
0105     mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
0106     mgmt->u.action.u.addba_req.start_seq_num =
0107                     cpu_to_le16(start_seq_num << 4);
0108 
0109     ieee80211_tx_skb_tid(sdata, skb, tid, -1);
0110 }
0111 
0112 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
0113 {
0114     struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
0115     struct ieee80211_local *local = sdata->local;
0116     struct sk_buff *skb;
0117     struct ieee80211_bar *bar;
0118     u16 bar_control = 0;
0119 
0120     skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
0121     if (!skb)
0122         return;
0123 
0124     skb_reserve(skb, local->hw.extra_tx_headroom);
0125     bar = skb_put_zero(skb, sizeof(*bar));
0126     bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
0127                      IEEE80211_STYPE_BACK_REQ);
0128     memcpy(bar->ra, ra, ETH_ALEN);
0129     memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
0130     bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
0131     bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
0132     bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
0133     bar->control = cpu_to_le16(bar_control);
0134     bar->start_seq_num = cpu_to_le16(ssn);
0135 
0136     IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
0137                     IEEE80211_TX_CTL_REQ_TX_STATUS;
0138     ieee80211_tx_skb_tid(sdata, skb, tid, -1);
0139 }
0140 EXPORT_SYMBOL(ieee80211_send_bar);
0141 
0142 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
0143                  struct tid_ampdu_tx *tid_tx)
0144 {
0145     lockdep_assert_held(&sta->ampdu_mlme.mtx);
0146     lockdep_assert_held(&sta->lock);
0147     rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
0148 }
0149 
0150 /*
0151  * When multiple aggregation sessions on multiple stations
0152  * are being created/destroyed simultaneously, we need to
0153  * refcount the global queue stop caused by that in order
0154  * to not get into a situation where one of the aggregation
0155  * setup or teardown re-enables queues before the other is
0156  * ready to handle that.
0157  *
0158  * These two functions take care of this issue by keeping
0159  * a global "agg_queue_stop" refcount.
0160  */
0161 static void __acquires(agg_queue)
0162 ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
0163 {
0164     int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
0165 
0166     /* we do refcounting here, so don't use the queue reason refcounting */
0167 
0168     if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
0169         ieee80211_stop_queue_by_reason(
0170             &sdata->local->hw, queue,
0171             IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
0172             false);
0173     __acquire(agg_queue);
0174 }
0175 
0176 static void __releases(agg_queue)
0177 ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
0178 {
0179     int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
0180 
0181     if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
0182         ieee80211_wake_queue_by_reason(
0183             &sdata->local->hw, queue,
0184             IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
0185             false);
0186     __release(agg_queue);
0187 }
0188 
0189 static void
0190 ieee80211_agg_stop_txq(struct sta_info *sta, int tid)
0191 {
0192     struct ieee80211_txq *txq = sta->sta.txq[tid];
0193     struct ieee80211_sub_if_data *sdata;
0194     struct fq *fq;
0195     struct txq_info *txqi;
0196 
0197     if (!txq)
0198         return;
0199 
0200     txqi = to_txq_info(txq);
0201     sdata = vif_to_sdata(txq->vif);
0202     fq = &sdata->local->fq;
0203 
0204     /* Lock here to protect against further seqno updates on dequeue */
0205     spin_lock_bh(&fq->lock);
0206     set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
0207     spin_unlock_bh(&fq->lock);
0208 }
0209 
0210 static void
0211 ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
0212 {
0213     struct ieee80211_txq *txq = sta->sta.txq[tid];
0214     struct txq_info *txqi;
0215 
0216     lockdep_assert_held(&sta->ampdu_mlme.mtx);
0217 
0218     if (!txq)
0219         return;
0220 
0221     txqi = to_txq_info(txq);
0222 
0223     if (enable)
0224         set_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
0225     else
0226         clear_bit(IEEE80211_TXQ_AMPDU, &txqi->flags);
0227 
0228     clear_bit(IEEE80211_TXQ_STOP, &txqi->flags);
0229     local_bh_disable();
0230     rcu_read_lock();
0231     schedule_and_wake_txq(sta->sdata->local, txqi);
0232     rcu_read_unlock();
0233     local_bh_enable();
0234 }
0235 
0236 /*
0237  * splice packets from the STA's pending to the local pending,
0238  * requires a call to ieee80211_agg_splice_finish later
0239  */
0240 static void __acquires(agg_queue)
0241 ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
0242                  struct tid_ampdu_tx *tid_tx, u16 tid)
0243 {
0244     struct ieee80211_local *local = sdata->local;
0245     int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
0246     unsigned long flags;
0247 
0248     ieee80211_stop_queue_agg(sdata, tid);
0249 
0250     if (WARN(!tid_tx,
0251          "TID %d gone but expected when splicing aggregates from the pending queue\n",
0252          tid))
0253         return;
0254 
0255     if (!skb_queue_empty(&tid_tx->pending)) {
0256         spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
0257         /* copy over remaining packets */
0258         skb_queue_splice_tail_init(&tid_tx->pending,
0259                        &local->pending[queue]);
0260         spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
0261     }
0262 }
0263 
0264 static void __releases(agg_queue)
0265 ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
0266 {
0267     ieee80211_wake_queue_agg(sdata, tid);
0268 }
0269 
0270 static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
0271 {
0272     struct tid_ampdu_tx *tid_tx;
0273 
0274     lockdep_assert_held(&sta->ampdu_mlme.mtx);
0275     lockdep_assert_held(&sta->lock);
0276 
0277     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0278 
0279     /*
0280      * When we get here, the TX path will not be lockless any more wrt.
0281      * aggregation, since the OPERATIONAL bit has long been cleared.
0282      * Thus it will block on getting the lock, if it occurs. So if we
0283      * stop the queue now, we will not get any more packets, and any
0284      * that might be being processed will wait for us here, thereby
0285      * guaranteeing that no packets go to the tid_tx pending queue any
0286      * more.
0287      */
0288 
0289     ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
0290 
0291     /* future packets must not find the tid_tx struct any more */
0292     ieee80211_assign_tid_tx(sta, tid, NULL);
0293 
0294     ieee80211_agg_splice_finish(sta->sdata, tid);
0295 
0296     kfree_rcu(tid_tx, rcu_head);
0297 }
0298 
0299 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
0300                     enum ieee80211_agg_stop_reason reason)
0301 {
0302     struct ieee80211_local *local = sta->local;
0303     struct tid_ampdu_tx *tid_tx;
0304     struct ieee80211_ampdu_params params = {
0305         .sta = &sta->sta,
0306         .tid = tid,
0307         .buf_size = 0,
0308         .amsdu = false,
0309         .timeout = 0,
0310         .ssn = 0,
0311     };
0312     int ret;
0313 
0314     lockdep_assert_held(&sta->ampdu_mlme.mtx);
0315 
0316     switch (reason) {
0317     case AGG_STOP_DECLINED:
0318     case AGG_STOP_LOCAL_REQUEST:
0319     case AGG_STOP_PEER_REQUEST:
0320         params.action = IEEE80211_AMPDU_TX_STOP_CONT;
0321         break;
0322     case AGG_STOP_DESTROY_STA:
0323         params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
0324         break;
0325     default:
0326         WARN_ON_ONCE(1);
0327         return -EINVAL;
0328     }
0329 
0330     spin_lock_bh(&sta->lock);
0331 
0332     /* free struct pending for start, if present */
0333     tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
0334     kfree(tid_tx);
0335     sta->ampdu_mlme.tid_start_tx[tid] = NULL;
0336 
0337     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0338     if (!tid_tx) {
0339         spin_unlock_bh(&sta->lock);
0340         return -ENOENT;
0341     }
0342 
0343     /*
0344      * if we're already stopping ignore any new requests to stop
0345      * unless we're destroying it in which case notify the driver
0346      */
0347     if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
0348         spin_unlock_bh(&sta->lock);
0349         if (reason != AGG_STOP_DESTROY_STA)
0350             return -EALREADY;
0351         params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
0352         ret = drv_ampdu_action(local, sta->sdata, &params);
0353         WARN_ON_ONCE(ret);
0354         return 0;
0355     }
0356 
0357     if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
0358         /* not even started yet! */
0359         ieee80211_assign_tid_tx(sta, tid, NULL);
0360         spin_unlock_bh(&sta->lock);
0361         kfree_rcu(tid_tx, rcu_head);
0362         return 0;
0363     }
0364 
0365     set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
0366 
0367     ieee80211_agg_stop_txq(sta, tid);
0368 
0369     spin_unlock_bh(&sta->lock);
0370 
0371     ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
0372            sta->sta.addr, tid);
0373 
0374     del_timer_sync(&tid_tx->addba_resp_timer);
0375     del_timer_sync(&tid_tx->session_timer);
0376 
0377     /*
0378      * After this packets are no longer handed right through
0379      * to the driver but are put onto tid_tx->pending instead,
0380      * with locking to ensure proper access.
0381      */
0382     clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
0383 
0384     /*
0385      * There might be a few packets being processed right now (on
0386      * another CPU) that have already gotten past the aggregation
0387      * check when it was still OPERATIONAL and consequently have
0388      * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
0389      * call into the driver at the same time or even before the
0390      * TX paths calls into it, which could confuse the driver.
0391      *
0392      * Wait for all currently running TX paths to finish before
0393      * telling the driver. New packets will not go through since
0394      * the aggregation session is no longer OPERATIONAL.
0395      */
0396     if (!local->in_reconfig)
0397         synchronize_net();
0398 
0399     tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
0400                     WLAN_BACK_RECIPIENT :
0401                     WLAN_BACK_INITIATOR;
0402     tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
0403 
0404     ret = drv_ampdu_action(local, sta->sdata, &params);
0405 
0406     /* HW shall not deny going back to legacy */
0407     if (WARN_ON(ret)) {
0408         /*
0409          * We may have pending packets get stuck in this case...
0410          * Not bothering with a workaround for now.
0411          */
0412     }
0413 
0414     /*
0415      * In the case of AGG_STOP_DESTROY_STA, the driver won't
0416      * necessarily call ieee80211_stop_tx_ba_cb(), so this may
0417      * seem like we can leave the tid_tx data pending forever.
0418      * This is true, in a way, but "forever" is only until the
0419      * station struct is actually destroyed. In the meantime,
0420      * leaving it around ensures that we don't transmit packets
0421      * to the driver on this TID which might confuse it.
0422      */
0423 
0424     return 0;
0425 }
0426 
0427 /*
0428  * After sending add Block Ack request we activated a timer until
0429  * add Block Ack response will arrive from the recipient.
0430  * If this timer expires sta_addba_resp_timer_expired will be executed.
0431  */
0432 static void sta_addba_resp_timer_expired(struct timer_list *t)
0433 {
0434     struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
0435     struct sta_info *sta = tid_tx->sta;
0436     u8 tid = tid_tx->tid;
0437 
0438     /* check if the TID waits for addBA response */
0439     if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
0440         ht_dbg(sta->sdata,
0441                "timer expired on %pM tid %d not expecting addBA response\n",
0442                sta->sta.addr, tid);
0443         return;
0444     }
0445 
0446     ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
0447            sta->sta.addr, tid);
0448 
0449     ieee80211_stop_tx_ba_session(&sta->sta, tid);
0450 }
0451 
0452 static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
0453                           struct tid_ampdu_tx *tid_tx)
0454 {
0455     struct ieee80211_sub_if_data *sdata = sta->sdata;
0456     struct ieee80211_local *local = sta->local;
0457     u8 tid = tid_tx->tid;
0458     u16 buf_size;
0459 
0460     /* activate the timer for the recipient's addBA response */
0461     mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
0462     ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
0463            sta->sta.addr, tid);
0464 
0465     spin_lock_bh(&sta->lock);
0466     sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
0467     sta->ampdu_mlme.addba_req_num[tid]++;
0468     spin_unlock_bh(&sta->lock);
0469 
0470     if (sta->sta.deflink.he_cap.has_he) {
0471         buf_size = local->hw.max_tx_aggregation_subframes;
0472     } else {
0473         /*
0474          * We really should use what the driver told us it will
0475          * transmit as the maximum, but certain APs (e.g. the
0476          * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
0477          * will crash when we use a lower number.
0478          */
0479         buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
0480     }
0481 
0482     /* send AddBA request */
0483     ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
0484                      tid_tx->dialog_token, tid_tx->ssn,
0485                      buf_size, tid_tx->timeout);
0486 
0487     WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
0488 }
0489 
0490 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
0491 {
0492     struct tid_ampdu_tx *tid_tx;
0493     struct ieee80211_local *local = sta->local;
0494     struct ieee80211_sub_if_data *sdata = sta->sdata;
0495     struct ieee80211_ampdu_params params = {
0496         .sta = &sta->sta,
0497         .action = IEEE80211_AMPDU_TX_START,
0498         .tid = tid,
0499         .buf_size = 0,
0500         .amsdu = false,
0501         .timeout = 0,
0502     };
0503     int ret;
0504 
0505     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0506 
0507     /*
0508      * Start queuing up packets for this aggregation session.
0509      * We're going to release them once the driver is OK with
0510      * that.
0511      */
0512     clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
0513 
0514     ieee80211_agg_stop_txq(sta, tid);
0515 
0516     /*
0517      * Make sure no packets are being processed. This ensures that
0518      * we have a valid starting sequence number and that in-flight
0519      * packets have been flushed out and no packets for this TID
0520      * will go into the driver during the ampdu_action call.
0521      */
0522     synchronize_net();
0523 
0524     params.ssn = sta->tid_seq[tid] >> 4;
0525     ret = drv_ampdu_action(local, sdata, &params);
0526     tid_tx->ssn = params.ssn;
0527     if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
0528         return;
0529     } else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
0530         /*
0531          * We didn't send the request yet, so don't need to check
0532          * here if we already got a response, just mark as driver
0533          * ready immediately.
0534          */
0535         set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
0536     } else if (ret) {
0537         ht_dbg(sdata,
0538                "BA request denied - HW unavailable for %pM tid %d\n",
0539                sta->sta.addr, tid);
0540         spin_lock_bh(&sta->lock);
0541         ieee80211_agg_splice_packets(sdata, tid_tx, tid);
0542         ieee80211_assign_tid_tx(sta, tid, NULL);
0543         ieee80211_agg_splice_finish(sdata, tid);
0544         spin_unlock_bh(&sta->lock);
0545 
0546         ieee80211_agg_start_txq(sta, tid, false);
0547 
0548         kfree_rcu(tid_tx, rcu_head);
0549         return;
0550     }
0551 
0552     ieee80211_send_addba_with_timeout(sta, tid_tx);
0553 }
0554 
0555 /*
0556  * After accepting the AddBA Response we activated a timer,
0557  * resetting it after each frame that we send.
0558  */
0559 static void sta_tx_agg_session_timer_expired(struct timer_list *t)
0560 {
0561     struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
0562     struct sta_info *sta = tid_tx->sta;
0563     u8 tid = tid_tx->tid;
0564     unsigned long timeout;
0565 
0566     if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
0567         return;
0568     }
0569 
0570     timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
0571     if (time_is_after_jiffies(timeout)) {
0572         mod_timer(&tid_tx->session_timer, timeout);
0573         return;
0574     }
0575 
0576     ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
0577            sta->sta.addr, tid);
0578 
0579     ieee80211_stop_tx_ba_session(&sta->sta, tid);
0580 }
0581 
0582 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
0583                   u16 timeout)
0584 {
0585     struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
0586     struct ieee80211_sub_if_data *sdata = sta->sdata;
0587     struct ieee80211_local *local = sdata->local;
0588     struct tid_ampdu_tx *tid_tx;
0589     int ret = 0;
0590 
0591     trace_api_start_tx_ba_session(pubsta, tid);
0592 
0593     if (WARN(sta->reserved_tid == tid,
0594          "Requested to start BA session on reserved tid=%d", tid))
0595         return -EINVAL;
0596 
0597     if (!pubsta->deflink.ht_cap.ht_supported &&
0598         sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
0599         return -EINVAL;
0600 
0601     if (WARN_ON_ONCE(!local->ops->ampdu_action))
0602         return -EINVAL;
0603 
0604     if ((tid >= IEEE80211_NUM_TIDS) ||
0605         !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
0606         ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
0607         return -EINVAL;
0608 
0609     if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
0610         return -EINVAL;
0611 
0612     ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
0613            pubsta->addr, tid);
0614 
0615     if (sdata->vif.type != NL80211_IFTYPE_STATION &&
0616         sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
0617         sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
0618         sdata->vif.type != NL80211_IFTYPE_AP &&
0619         sdata->vif.type != NL80211_IFTYPE_ADHOC)
0620         return -EINVAL;
0621 
0622     if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
0623         ht_dbg(sdata,
0624                "BA sessions blocked - Denying BA session request %pM tid %d\n",
0625                sta->sta.addr, tid);
0626         return -EINVAL;
0627     }
0628 
0629     if (test_sta_flag(sta, WLAN_STA_MFP) &&
0630         !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
0631         ht_dbg(sdata,
0632                "MFP STA not authorized - deny BA session request %pM tid %d\n",
0633                sta->sta.addr, tid);
0634         return -EINVAL;
0635     }
0636 
0637     /*
0638      * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
0639      * member of an IBSS, and has no other existing Block Ack agreement
0640      * with the recipient STA, then the initiating STA shall transmit a
0641      * Probe Request frame to the recipient STA and shall not transmit an
0642      * ADDBA Request frame unless it receives a Probe Response frame
0643      * from the recipient within dot11ADDBAFailureTimeout.
0644      *
0645      * The probe request mechanism for ADDBA is currently not implemented,
0646      * but we only build up Block Ack session with HT STAs. This information
0647      * is set when we receive a bss info from a probe response or a beacon.
0648      */
0649     if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
0650         !sta->sta.deflink.ht_cap.ht_supported) {
0651         ht_dbg(sdata,
0652                "BA request denied - IBSS STA %pM does not advertise HT support\n",
0653                pubsta->addr);
0654         return -EINVAL;
0655     }
0656 
0657     spin_lock_bh(&sta->lock);
0658 
0659     /* we have tried too many times, receiver does not want A-MPDU */
0660     if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
0661         ret = -EBUSY;
0662         goto err_unlock_sta;
0663     }
0664 
0665     /*
0666      * if we have tried more than HT_AGG_BURST_RETRIES times we
0667      * will spread our requests in time to avoid stalling connection
0668      * for too long
0669      */
0670     if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
0671         time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
0672             HT_AGG_RETRIES_PERIOD)) {
0673         ht_dbg(sdata,
0674                "BA request denied - %d failed requests on %pM tid %u\n",
0675                sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
0676         ret = -EBUSY;
0677         goto err_unlock_sta;
0678     }
0679 
0680     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0681     /* check if the TID is not in aggregation flow already */
0682     if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
0683         ht_dbg(sdata,
0684                "BA request denied - session is not idle on %pM tid %u\n",
0685                sta->sta.addr, tid);
0686         ret = -EAGAIN;
0687         goto err_unlock_sta;
0688     }
0689 
0690     /* prepare A-MPDU MLME for Tx aggregation */
0691     tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
0692     if (!tid_tx) {
0693         ret = -ENOMEM;
0694         goto err_unlock_sta;
0695     }
0696 
0697     skb_queue_head_init(&tid_tx->pending);
0698     __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
0699 
0700     tid_tx->timeout = timeout;
0701     tid_tx->sta = sta;
0702     tid_tx->tid = tid;
0703 
0704     /* response timer */
0705     timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);
0706 
0707     /* tx timer */
0708     timer_setup(&tid_tx->session_timer,
0709             sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);
0710 
0711     /* assign a dialog token */
0712     sta->ampdu_mlme.dialog_token_allocator++;
0713     tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
0714 
0715     /*
0716      * Finally, assign it to the start array; the work item will
0717      * collect it and move it to the normal array.
0718      */
0719     sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
0720 
0721     ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
0722 
0723     /* this flow continues off the work */
0724  err_unlock_sta:
0725     spin_unlock_bh(&sta->lock);
0726     return ret;
0727 }
0728 EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
0729 
0730 static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
0731                      struct sta_info *sta, u16 tid)
0732 {
0733     struct tid_ampdu_tx *tid_tx;
0734     struct ieee80211_ampdu_params params = {
0735         .sta = &sta->sta,
0736         .action = IEEE80211_AMPDU_TX_OPERATIONAL,
0737         .tid = tid,
0738         .timeout = 0,
0739         .ssn = 0,
0740     };
0741 
0742     lockdep_assert_held(&sta->ampdu_mlme.mtx);
0743 
0744     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0745     params.buf_size = tid_tx->buf_size;
0746     params.amsdu = tid_tx->amsdu;
0747 
0748     ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
0749            sta->sta.addr, tid);
0750 
0751     drv_ampdu_action(local, sta->sdata, &params);
0752 
0753     /*
0754      * synchronize with TX path, while splicing the TX path
0755      * should block so it won't put more packets onto pending.
0756      */
0757     spin_lock_bh(&sta->lock);
0758 
0759     ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
0760     /*
0761      * Now mark as operational. This will be visible
0762      * in the TX path, and lets it go lock-free in
0763      * the common case.
0764      */
0765     set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
0766     ieee80211_agg_splice_finish(sta->sdata, tid);
0767 
0768     spin_unlock_bh(&sta->lock);
0769 
0770     ieee80211_agg_start_txq(sta, tid, true);
0771 }
0772 
0773 void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
0774                   struct tid_ampdu_tx *tid_tx)
0775 {
0776     struct ieee80211_sub_if_data *sdata = sta->sdata;
0777     struct ieee80211_local *local = sdata->local;
0778 
0779     if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
0780         return;
0781 
0782     if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) {
0783         ieee80211_send_addba_with_timeout(sta, tid_tx);
0784         /* RESPONSE_RECEIVED state whould trigger the flow again */
0785         return;
0786     }
0787 
0788     if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
0789         ieee80211_agg_tx_operational(local, sta, tid);
0790 }
0791 
0792 static struct tid_ampdu_tx *
0793 ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
0794             const u8 *ra, u16 tid, struct sta_info **sta)
0795 {
0796     struct tid_ampdu_tx *tid_tx;
0797 
0798     if (tid >= IEEE80211_NUM_TIDS) {
0799         ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
0800                tid, IEEE80211_NUM_TIDS);
0801         return NULL;
0802     }
0803 
0804     *sta = sta_info_get_bss(sdata, ra);
0805     if (!*sta) {
0806         ht_dbg(sdata, "Could not find station: %pM\n", ra);
0807         return NULL;
0808     }
0809 
0810     tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
0811 
0812     if (WARN_ON(!tid_tx))
0813         ht_dbg(sdata, "addBA was not requested!\n");
0814 
0815     return tid_tx;
0816 }
0817 
0818 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
0819                       const u8 *ra, u16 tid)
0820 {
0821     struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
0822     struct ieee80211_local *local = sdata->local;
0823     struct sta_info *sta;
0824     struct tid_ampdu_tx *tid_tx;
0825 
0826     trace_api_start_tx_ba_cb(sdata, ra, tid);
0827 
0828     rcu_read_lock();
0829     tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
0830     if (!tid_tx)
0831         goto out;
0832 
0833     set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
0834     ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
0835  out:
0836     rcu_read_unlock();
0837 }
0838 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
0839 
0840 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
0841                    enum ieee80211_agg_stop_reason reason)
0842 {
0843     int ret;
0844 
0845     mutex_lock(&sta->ampdu_mlme.mtx);
0846 
0847     ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
0848 
0849     mutex_unlock(&sta->ampdu_mlme.mtx);
0850 
0851     return ret;
0852 }
0853 
0854 int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
0855 {
0856     struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
0857     struct ieee80211_sub_if_data *sdata = sta->sdata;
0858     struct ieee80211_local *local = sdata->local;
0859     struct tid_ampdu_tx *tid_tx;
0860     int ret = 0;
0861 
0862     trace_api_stop_tx_ba_session(pubsta, tid);
0863 
0864     if (!local->ops->ampdu_action)
0865         return -EINVAL;
0866 
0867     if (tid >= IEEE80211_NUM_TIDS)
0868         return -EINVAL;
0869 
0870     spin_lock_bh(&sta->lock);
0871     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0872 
0873     if (!tid_tx) {
0874         ret = -ENOENT;
0875         goto unlock;
0876     }
0877 
0878     WARN(sta->reserved_tid == tid,
0879          "Requested to stop BA session on reserved tid=%d", tid);
0880 
0881     if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
0882         /* already in progress stopping it */
0883         ret = 0;
0884         goto unlock;
0885     }
0886 
0887     set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
0888     ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
0889 
0890  unlock:
0891     spin_unlock_bh(&sta->lock);
0892     return ret;
0893 }
0894 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
0895 
0896 void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
0897                  struct tid_ampdu_tx *tid_tx)
0898 {
0899     struct ieee80211_sub_if_data *sdata = sta->sdata;
0900     bool send_delba = false;
0901     bool start_txq = false;
0902 
0903     ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
0904            sta->sta.addr, tid);
0905 
0906     spin_lock_bh(&sta->lock);
0907 
0908     if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
0909         ht_dbg(sdata,
0910                "unexpected callback to A-MPDU stop for %pM tid %d\n",
0911                sta->sta.addr, tid);
0912         goto unlock_sta;
0913     }
0914 
0915     if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
0916         send_delba = true;
0917 
0918     ieee80211_remove_tid_tx(sta, tid);
0919     start_txq = true;
0920 
0921  unlock_sta:
0922     spin_unlock_bh(&sta->lock);
0923 
0924     if (start_txq)
0925         ieee80211_agg_start_txq(sta, tid, false);
0926 
0927     if (send_delba)
0928         ieee80211_send_delba(sdata, sta->sta.addr, tid,
0929             WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
0930 }
0931 
0932 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
0933                      const u8 *ra, u16 tid)
0934 {
0935     struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
0936     struct ieee80211_local *local = sdata->local;
0937     struct sta_info *sta;
0938     struct tid_ampdu_tx *tid_tx;
0939 
0940     trace_api_stop_tx_ba_cb(sdata, ra, tid);
0941 
0942     rcu_read_lock();
0943     tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
0944     if (!tid_tx)
0945         goto out;
0946 
0947     set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
0948     ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
0949  out:
0950     rcu_read_unlock();
0951 }
0952 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
0953 
0954 
0955 void ieee80211_process_addba_resp(struct ieee80211_local *local,
0956                   struct sta_info *sta,
0957                   struct ieee80211_mgmt *mgmt,
0958                   size_t len)
0959 {
0960     struct tid_ampdu_tx *tid_tx;
0961     struct ieee80211_txq *txq;
0962     u16 capab, tid, buf_size;
0963     bool amsdu;
0964 
0965     capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
0966     amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
0967     tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK);
0968     buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK);
0969     buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
0970 
0971     txq = sta->sta.txq[tid];
0972     if (!amsdu && txq)
0973         set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);
0974 
0975     mutex_lock(&sta->ampdu_mlme.mtx);
0976 
0977     tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
0978     if (!tid_tx)
0979         goto out;
0980 
0981     if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
0982         ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
0983                sta->sta.addr, tid);
0984         goto out;
0985     }
0986 
0987     del_timer_sync(&tid_tx->addba_resp_timer);
0988 
0989     ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
0990            sta->sta.addr, tid);
0991 
0992     /*
0993      * addba_resp_timer may have fired before we got here, and
0994      * caused WANT_STOP to be set. If the stop then was already
0995      * processed further, STOPPING might be set.
0996      */
0997     if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
0998         test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
0999         ht_dbg(sta->sdata,
1000                "got addBA resp for %pM tid %d but we already gave up\n",
1001                sta->sta.addr, tid);
1002         goto out;
1003     }
1004 
1005     /*
1006      * IEEE 802.11-2007 7.3.1.14:
1007      * In an ADDBA Response frame, when the Status Code field
1008      * is set to 0, the Buffer Size subfield is set to a value
1009      * of at least 1.
1010      */
1011     if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1012             == WLAN_STATUS_SUCCESS && buf_size) {
1013         if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
1014                      &tid_tx->state)) {
1015             /* ignore duplicate response */
1016             goto out;
1017         }
1018 
1019         tid_tx->buf_size = buf_size;
1020         tid_tx->amsdu = amsdu;
1021 
1022         if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
1023             ieee80211_agg_tx_operational(local, sta, tid);
1024 
1025         sta->ampdu_mlme.addba_req_num[tid] = 0;
1026 
1027         tid_tx->timeout =
1028             le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
1029 
1030         if (tid_tx->timeout) {
1031             mod_timer(&tid_tx->session_timer,
1032                   TU_TO_EXP_TIME(tid_tx->timeout));
1033             tid_tx->last_tx = jiffies;
1034         }
1035 
1036     } else {
1037         ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
1038     }
1039 
1040  out:
1041     mutex_unlock(&sta->ampdu_mlme.mtx);
1042 }