0001
0002
0003
0004
0005
0006 #include "mt76.h"
0007
0008 static int
0009 mt76_txq_get_qid(struct ieee80211_txq *txq)
0010 {
0011 if (!txq->sta)
0012 return MT_TXQ_BE;
0013
0014 return txq->ac;
0015 }
0016
0017 void
0018 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
0019 {
0020 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0021 struct ieee80211_txq *txq;
0022 struct mt76_txq *mtxq;
0023 u8 tid;
0024
0025 if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
0026 !ieee80211_is_data_present(hdr->frame_control))
0027 return;
0028
0029 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
0030 txq = sta->txq[tid];
0031 mtxq = (struct mt76_txq *)txq->drv_priv;
0032 if (!mtxq->aggr)
0033 return;
0034
0035 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
0036 }
0037 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
0038
0039 void
0040 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
0041 __acquires(&dev->status_lock)
0042 {
0043 __skb_queue_head_init(list);
0044 spin_lock_bh(&dev->status_lock);
0045 }
0046 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
0047
0048 void
0049 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
0050 __releases(&dev->status_lock)
0051 {
0052 struct ieee80211_hw *hw;
0053 struct sk_buff *skb;
0054
0055 spin_unlock_bh(&dev->status_lock);
0056
0057 rcu_read_lock();
0058 while ((skb = __skb_dequeue(list)) != NULL) {
0059 struct ieee80211_tx_status status = {
0060 .skb = skb,
0061 .info = IEEE80211_SKB_CB(skb),
0062 };
0063 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
0064 struct mt76_wcid *wcid;
0065
0066 wcid = rcu_dereference(dev->wcid[cb->wcid]);
0067 if (wcid) {
0068 status.sta = wcid_to_sta(wcid);
0069 status.rates = NULL;
0070 status.n_rates = 0;
0071 }
0072
0073 hw = mt76_tx_status_get_hw(dev, skb);
0074 ieee80211_tx_status_ext(hw, &status);
0075 }
0076 rcu_read_unlock();
0077 }
0078 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
0079
0080 static void
0081 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
0082 struct sk_buff_head *list)
0083 {
0084 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0085 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
0086 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
0087
0088 flags |= cb->flags;
0089 cb->flags = flags;
0090
0091 if ((flags & done) != done)
0092 return;
0093
0094
0095 if (flags & MT_TX_CB_TXS_FAILED) {
0096 info->status.rates[0].count = 0;
0097 info->status.rates[0].idx = -1;
0098 info->flags |= IEEE80211_TX_STAT_ACK;
0099 }
0100
0101 __skb_queue_tail(list, skb);
0102 }
0103
0104 void
0105 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
0106 struct sk_buff_head *list)
0107 {
0108 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
0109 }
0110 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
0111
0112 int
0113 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
0114 struct sk_buff *skb)
0115 {
0116 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0117 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
0118 int pid;
0119
0120 memset(cb, 0, sizeof(*cb));
0121
0122 if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
0123 return MT_PACKET_ID_NO_ACK;
0124
0125 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
0126 return MT_PACKET_ID_NO_ACK;
0127
0128 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
0129 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
0130 return MT_PACKET_ID_NO_SKB;
0131
0132 spin_lock_bh(&dev->status_lock);
0133
0134 pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
0135 MT_PACKET_ID_MASK, GFP_ATOMIC);
0136 if (pid < 0) {
0137 pid = MT_PACKET_ID_NO_SKB;
0138 goto out;
0139 }
0140
0141 cb->wcid = wcid->idx;
0142 cb->pktid = pid;
0143
0144 if (list_empty(&wcid->list))
0145 list_add_tail(&wcid->list, &dev->wcid_list);
0146
0147 out:
0148 spin_unlock_bh(&dev->status_lock);
0149
0150 return pid;
0151 }
0152 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
0153
0154 struct sk_buff *
0155 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
0156 struct sk_buff_head *list)
0157 {
0158 struct sk_buff *skb;
0159 int id;
0160
0161 lockdep_assert_held(&dev->status_lock);
0162
0163 skb = idr_remove(&wcid->pktid, pktid);
0164 if (skb)
0165 goto out;
0166
0167
0168 idr_for_each_entry(&wcid->pktid, skb, id) {
0169 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
0170
0171 if (pktid >= 0) {
0172 if (!(cb->flags & MT_TX_CB_DMA_DONE))
0173 continue;
0174
0175 if (time_is_after_jiffies(cb->jiffies +
0176 MT_TX_STATUS_SKB_TIMEOUT))
0177 continue;
0178 }
0179
0180
0181
0182
0183 idr_remove(&wcid->pktid, cb->pktid);
0184 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
0185 MT_TX_CB_TXS_DONE, list);
0186 }
0187
0188 out:
0189 if (idr_is_empty(&wcid->pktid))
0190 list_del_init(&wcid->list);
0191
0192 return skb;
0193 }
0194 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
0195
0196 void
0197 mt76_tx_status_check(struct mt76_dev *dev, bool flush)
0198 {
0199 struct mt76_wcid *wcid, *tmp;
0200 struct sk_buff_head list;
0201
0202 mt76_tx_status_lock(dev, &list);
0203 list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
0204 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
0205 mt76_tx_status_unlock(dev, &list);
0206 }
0207 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
0208
0209 static void
0210 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
0211 struct sk_buff *skb)
0212 {
0213 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0214 int pending;
0215
0216 if (!wcid || info->tx_time_est)
0217 return;
0218
0219 pending = atomic_dec_return(&wcid->non_aql_packets);
0220 if (pending < 0)
0221 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
0222 }
0223
0224 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
0225 struct list_head *free_list)
0226 {
0227 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
0228 struct ieee80211_tx_status status = {
0229 .skb = skb,
0230 .free_list = free_list,
0231 };
0232 struct mt76_wcid *wcid = NULL;
0233 struct ieee80211_hw *hw;
0234 struct sk_buff_head list;
0235
0236 rcu_read_lock();
0237
0238 if (wcid_idx < ARRAY_SIZE(dev->wcid))
0239 wcid = rcu_dereference(dev->wcid[wcid_idx]);
0240
0241 mt76_tx_check_non_aql(dev, wcid, skb);
0242
0243 #ifdef CONFIG_NL80211_TESTMODE
0244 if (mt76_is_testmode_skb(dev, skb, &hw)) {
0245 struct mt76_phy *phy = hw->priv;
0246
0247 if (skb == phy->test.tx_skb)
0248 phy->test.tx_done++;
0249 if (phy->test.tx_queued == phy->test.tx_done)
0250 wake_up(&dev->tx_wait);
0251
0252 dev_kfree_skb_any(skb);
0253 goto out;
0254 }
0255 #endif
0256
0257 if (cb->pktid < MT_PACKET_ID_FIRST) {
0258 hw = mt76_tx_status_get_hw(dev, skb);
0259 status.sta = wcid_to_sta(wcid);
0260 ieee80211_tx_status_ext(hw, &status);
0261 goto out;
0262 }
0263
0264 mt76_tx_status_lock(dev, &list);
0265 cb->jiffies = jiffies;
0266 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
0267 mt76_tx_status_unlock(dev, &list);
0268
0269 out:
0270 rcu_read_unlock();
0271 }
0272 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
0273
0274 static int
0275 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
0276 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
0277 bool *stop)
0278 {
0279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0280 struct mt76_queue *q = phy->q_tx[qid];
0281 struct mt76_dev *dev = phy->dev;
0282 bool non_aql;
0283 int pending;
0284 int idx;
0285
0286 non_aql = !info->tx_time_est;
0287 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
0288 if (idx < 0 || !sta)
0289 return idx;
0290
0291 wcid = (struct mt76_wcid *)sta->drv_priv;
0292 q->entry[idx].wcid = wcid->idx;
0293
0294 if (!non_aql)
0295 return idx;
0296
0297 pending = atomic_inc_return(&wcid->non_aql_packets);
0298 if (stop && pending >= MT_MAX_NON_AQL_PKT)
0299 *stop = true;
0300
0301 return idx;
0302 }
0303
0304 void
0305 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
0306 struct mt76_wcid *wcid, struct sk_buff *skb)
0307 {
0308 struct mt76_dev *dev = phy->dev;
0309 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0310 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0311 struct mt76_queue *q;
0312 int qid = skb_get_queue_mapping(skb);
0313
0314 if (mt76_testmode_enabled(phy)) {
0315 ieee80211_free_txskb(phy->hw, skb);
0316 return;
0317 }
0318
0319 if (WARN_ON(qid >= MT_TXQ_PSD)) {
0320 qid = MT_TXQ_BE;
0321 skb_set_queue_mapping(skb, qid);
0322 }
0323
0324 if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
0325 !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
0326 !ieee80211_is_data(hdr->frame_control) &&
0327 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
0328 qid = MT_TXQ_PSD;
0329 }
0330
0331 if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
0332 ieee80211_get_tx_rates(info->control.vif, sta, skb,
0333 info->control.rates, 1);
0334
0335 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
0336 q = phy->q_tx[qid];
0337
0338 spin_lock_bh(&q->lock);
0339 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
0340 dev->queue_ops->kick(dev, q);
0341 spin_unlock_bh(&q->lock);
0342 }
0343 EXPORT_SYMBOL_GPL(mt76_tx);
0344
0345 static struct sk_buff *
0346 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
0347 {
0348 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
0349 struct ieee80211_tx_info *info;
0350 struct sk_buff *skb;
0351
0352 skb = ieee80211_tx_dequeue(phy->hw, txq);
0353 if (!skb)
0354 return NULL;
0355
0356 info = IEEE80211_SKB_CB(skb);
0357 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
0358
0359 return skb;
0360 }
0361
0362 static void
0363 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
0364 struct sk_buff *skb, bool last)
0365 {
0366 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
0367 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0368
0369 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
0370 if (last)
0371 info->flags |= IEEE80211_TX_STATUS_EOSP |
0372 IEEE80211_TX_CTL_REQ_TX_STATUS;
0373
0374 mt76_skb_set_moredata(skb, !last);
0375 __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
0376 }
0377
0378 void
0379 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
0380 u16 tids, int nframes,
0381 enum ieee80211_frame_release_type reason,
0382 bool more_data)
0383 {
0384 struct mt76_phy *phy = hw->priv;
0385 struct mt76_dev *dev = phy->dev;
0386 struct sk_buff *last_skb = NULL;
0387 struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
0388 int i;
0389
0390 spin_lock_bh(&hwq->lock);
0391 for (i = 0; tids && nframes; i++, tids >>= 1) {
0392 struct ieee80211_txq *txq = sta->txq[i];
0393 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
0394 struct sk_buff *skb;
0395
0396 if (!(tids & 1))
0397 continue;
0398
0399 do {
0400 skb = mt76_txq_dequeue(phy, mtxq);
0401 if (!skb)
0402 break;
0403
0404 nframes--;
0405 if (last_skb)
0406 mt76_queue_ps_skb(phy, sta, last_skb, false);
0407
0408 last_skb = skb;
0409 } while (nframes);
0410 }
0411
0412 if (last_skb) {
0413 mt76_queue_ps_skb(phy, sta, last_skb, true);
0414 dev->queue_ops->kick(dev, hwq);
0415 } else {
0416 ieee80211_sta_eosp(sta);
0417 }
0418
0419 spin_unlock_bh(&hwq->lock);
0420 }
0421 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
0422
0423 static bool
0424 mt76_txq_stopped(struct mt76_queue *q)
0425 {
0426 return q->stopped || q->blocked ||
0427 q->queued + MT_TXQ_FREE_THR >= q->ndesc;
0428 }
0429
0430 static int
0431 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
0432 struct mt76_txq *mtxq, struct mt76_wcid *wcid)
0433 {
0434 struct mt76_dev *dev = phy->dev;
0435 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
0436 enum mt76_txq_id qid = mt76_txq_get_qid(txq);
0437 struct ieee80211_tx_info *info;
0438 struct sk_buff *skb;
0439 int n_frames = 1;
0440 bool stop = false;
0441 int idx;
0442
0443 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
0444 return 0;
0445
0446 if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
0447 return 0;
0448
0449 skb = mt76_txq_dequeue(phy, mtxq);
0450 if (!skb)
0451 return 0;
0452
0453 info = IEEE80211_SKB_CB(skb);
0454 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
0455 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
0456 info->control.rates, 1);
0457
0458 spin_lock(&q->lock);
0459 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
0460 spin_unlock(&q->lock);
0461 if (idx < 0)
0462 return idx;
0463
0464 do {
0465 if (test_bit(MT76_RESET, &phy->state))
0466 return -EBUSY;
0467
0468 if (stop || mt76_txq_stopped(q))
0469 break;
0470
0471 skb = mt76_txq_dequeue(phy, mtxq);
0472 if (!skb)
0473 break;
0474
0475 info = IEEE80211_SKB_CB(skb);
0476 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
0477 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
0478 info->control.rates, 1);
0479
0480 spin_lock(&q->lock);
0481 idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
0482 spin_unlock(&q->lock);
0483 if (idx < 0)
0484 break;
0485
0486 n_frames++;
0487 } while (1);
0488
0489 spin_lock(&q->lock);
0490 dev->queue_ops->kick(dev, q);
0491 spin_unlock(&q->lock);
0492
0493 return n_frames;
0494 }
0495
0496 static int
0497 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
0498 {
0499 struct mt76_queue *q = phy->q_tx[qid];
0500 struct mt76_dev *dev = phy->dev;
0501 struct ieee80211_txq *txq;
0502 struct mt76_txq *mtxq;
0503 struct mt76_wcid *wcid;
0504 int ret = 0;
0505
0506 while (1) {
0507 int n_frames = 0;
0508
0509 if (test_bit(MT76_RESET, &phy->state))
0510 return -EBUSY;
0511
0512 if (dev->queue_ops->tx_cleanup &&
0513 q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
0514 dev->queue_ops->tx_cleanup(dev, q, false);
0515 }
0516
0517 txq = ieee80211_next_txq(phy->hw, qid);
0518 if (!txq)
0519 break;
0520
0521 mtxq = (struct mt76_txq *)txq->drv_priv;
0522 wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
0523 if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
0524 continue;
0525
0526 if (mtxq->send_bar && mtxq->aggr) {
0527 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
0528 struct ieee80211_sta *sta = txq->sta;
0529 struct ieee80211_vif *vif = txq->vif;
0530 u16 agg_ssn = mtxq->agg_ssn;
0531 u8 tid = txq->tid;
0532
0533 mtxq->send_bar = false;
0534 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
0535 }
0536
0537 if (!mt76_txq_stopped(q))
0538 n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
0539
0540 ieee80211_return_txq(phy->hw, txq, false);
0541
0542 if (unlikely(n_frames < 0))
0543 return n_frames;
0544
0545 ret += n_frames;
0546 }
0547
0548 return ret;
0549 }
0550
0551 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
0552 {
0553 int len;
0554
0555 if (qid >= 4)
0556 return;
0557
0558 local_bh_disable();
0559 rcu_read_lock();
0560
0561 do {
0562 ieee80211_txq_schedule_start(phy->hw, qid);
0563 len = mt76_txq_schedule_list(phy, qid);
0564 ieee80211_txq_schedule_end(phy->hw, qid);
0565 } while (len > 0);
0566
0567 rcu_read_unlock();
0568 local_bh_enable();
0569 }
0570 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
0571
0572 void mt76_txq_schedule_all(struct mt76_phy *phy)
0573 {
0574 int i;
0575
0576 for (i = 0; i <= MT_TXQ_BK; i++)
0577 mt76_txq_schedule(phy, i);
0578 }
0579 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
0580
0581 void mt76_tx_worker_run(struct mt76_dev *dev)
0582 {
0583 struct mt76_phy *phy;
0584 int i;
0585
0586 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
0587 phy = dev->phys[i];
0588 if (!phy)
0589 continue;
0590
0591 mt76_txq_schedule_all(phy);
0592 }
0593
0594 #ifdef CONFIG_NL80211_TESTMODE
0595 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
0596 phy = dev->phys[i];
0597 if (!phy || !phy->test.tx_pending)
0598 continue;
0599
0600 mt76_testmode_tx_pending(phy);
0601 }
0602 #endif
0603 }
0604 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
0605
0606 void mt76_tx_worker(struct mt76_worker *w)
0607 {
0608 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
0609
0610 mt76_tx_worker_run(dev);
0611 }
0612
0613 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
0614 bool send_bar)
0615 {
0616 int i;
0617
0618 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
0619 struct ieee80211_txq *txq = sta->txq[i];
0620 struct mt76_queue *hwq;
0621 struct mt76_txq *mtxq;
0622
0623 if (!txq)
0624 continue;
0625
0626 hwq = phy->q_tx[mt76_txq_get_qid(txq)];
0627 mtxq = (struct mt76_txq *)txq->drv_priv;
0628
0629 spin_lock_bh(&hwq->lock);
0630 mtxq->send_bar = mtxq->aggr && send_bar;
0631 spin_unlock_bh(&hwq->lock);
0632 }
0633 }
0634 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
0635
0636 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
0637 {
0638 struct mt76_phy *phy = hw->priv;
0639 struct mt76_dev *dev = phy->dev;
0640
0641 if (!test_bit(MT76_STATE_RUNNING, &phy->state))
0642 return;
0643
0644 mt76_worker_schedule(&dev->tx_worker);
0645 }
0646 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
0647
0648 u8 mt76_ac_to_hwq(u8 ac)
0649 {
0650 static const u8 wmm_queue_map[] = {
0651 [IEEE80211_AC_BE] = 0,
0652 [IEEE80211_AC_BK] = 1,
0653 [IEEE80211_AC_VI] = 2,
0654 [IEEE80211_AC_VO] = 3,
0655 };
0656
0657 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
0658 return 0;
0659
0660 return wmm_queue_map[ac];
0661 }
0662 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
0663
0664 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
0665 {
0666 struct sk_buff *iter, *last = skb;
0667
0668
0669
0670
0671 skb_walk_frags(skb, iter) {
0672 last = iter;
0673 if (!iter->next) {
0674 skb->data_len += pad;
0675 skb->len += pad;
0676 break;
0677 }
0678 }
0679
0680 if (skb_pad(last, pad))
0681 return -ENOMEM;
0682
0683 __skb_put(last, pad);
0684
0685 return 0;
0686 }
0687 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
0688
0689 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
0690 struct mt76_queue_entry *e)
0691 {
0692 if (e->skb)
0693 dev->drv->tx_complete_skb(dev, e);
0694
0695 spin_lock_bh(&q->lock);
0696 q->tail = (q->tail + 1) % q->ndesc;
0697 q->queued--;
0698 spin_unlock_bh(&q->lock);
0699 }
0700 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
0701
0702 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
0703 {
0704 struct mt76_phy *phy = &dev->phy;
0705 struct mt76_queue *q = phy->q_tx[0];
0706
0707 if (blocked == q->blocked)
0708 return;
0709
0710 q->blocked = blocked;
0711
0712 phy = dev->phys[MT_BAND1];
0713 if (phy) {
0714 q = phy->q_tx[0];
0715 q->blocked = blocked;
0716 }
0717 phy = dev->phys[MT_BAND2];
0718 if (phy) {
0719 q = phy->q_tx[0];
0720 q->blocked = blocked;
0721 }
0722
0723 if (!blocked)
0724 mt76_worker_schedule(&dev->tx_worker);
0725 }
0726 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
0727
0728 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
0729 {
0730 int token;
0731
0732 spin_lock_bh(&dev->token_lock);
0733
0734 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
0735 if (token >= 0)
0736 dev->token_count++;
0737
0738 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
0739 if (mtk_wed_device_active(&dev->mmio.wed) &&
0740 token >= dev->mmio.wed.wlan.token_start)
0741 dev->wed_token_count++;
0742 #endif
0743
0744 if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
0745 __mt76_set_tx_blocked(dev, true);
0746
0747 spin_unlock_bh(&dev->token_lock);
0748
0749 return token;
0750 }
0751 EXPORT_SYMBOL_GPL(mt76_token_consume);
0752
0753 struct mt76_txwi_cache *
0754 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
0755 {
0756 struct mt76_txwi_cache *txwi;
0757
0758 spin_lock_bh(&dev->token_lock);
0759
0760 txwi = idr_remove(&dev->token, token);
0761 if (txwi) {
0762 dev->token_count--;
0763
0764 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
0765 if (mtk_wed_device_active(&dev->mmio.wed) &&
0766 token >= dev->mmio.wed.wlan.token_start &&
0767 --dev->wed_token_count == 0)
0768 wake_up(&dev->tx_wait);
0769 #endif
0770 }
0771
0772 if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
0773 dev->phy.q_tx[0]->blocked)
0774 *wake = true;
0775
0776 spin_unlock_bh(&dev->token_lock);
0777
0778 return txwi;
0779 }
0780 EXPORT_SYMBOL_GPL(mt76_token_release);