0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 #include <linux/slab.h>
0041 #include <linux/module.h>
0042 #include <linux/etherdevice.h>
0043 #include <net/mac80211.h>
0044 #include "carl9170.h"
0045 #include "hw.h"
0046 #include "cmd.h"
0047
0048 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
0049 unsigned int queue)
0050 {
0051 if (unlikely(modparam_noht)) {
0052 return queue;
0053 } else {
0054
0055
0056
0057
0058
0059
0060 return 2;
0061 }
0062 }
0063
0064 static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
0065 struct sk_buff *skb)
0066 {
0067 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
0068 }
0069
0070 static bool is_mem_full(struct ar9170 *ar)
0071 {
0072 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
0073 atomic_read(&ar->mem_free_blocks));
0074 }
0075
0076 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
0077 {
0078 int queue, i;
0079 bool mem_full;
0080
0081 atomic_inc(&ar->tx_total_queued);
0082
0083 queue = skb_get_queue_mapping(skb);
0084 spin_lock_bh(&ar->tx_stats_lock);
0085
0086
0087
0088
0089
0090
0091
0092 ar->tx_stats[queue].len++;
0093 ar->tx_stats[queue].count++;
0094
0095 mem_full = is_mem_full(ar);
0096 for (i = 0; i < ar->hw->queues; i++) {
0097 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
0098 ieee80211_stop_queue(ar->hw, i);
0099 ar->queue_stop_timeout[i] = jiffies;
0100 }
0101 }
0102
0103 spin_unlock_bh(&ar->tx_stats_lock);
0104 }
0105
0106
0107 static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
0108 struct sk_buff *skb)
0109 {
0110 struct _carl9170_tx_superframe *super = (void *) skb->data;
0111 struct ieee80211_hdr *hdr = (void *) super->frame_data;
0112 struct ieee80211_vif *vif;
0113 unsigned int vif_id;
0114
0115 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
0116 CARL9170_TX_SUPER_MISC_VIF_ID_S;
0117
0118 if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
0119 return NULL;
0120
0121 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
0122 if (unlikely(!vif))
0123 return NULL;
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 return ieee80211_find_sta(vif, hdr->addr1);
0136 }
0137
0138 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
0139 {
0140 struct ieee80211_sta *sta;
0141 struct carl9170_sta_info *sta_info;
0142
0143 rcu_read_lock();
0144 sta = __carl9170_get_tx_sta(ar, skb);
0145 if (unlikely(!sta))
0146 goto out_rcu;
0147
0148 sta_info = (struct carl9170_sta_info *) sta->drv_priv;
0149 if (atomic_dec_return(&sta_info->pending_frames) == 0)
0150 ieee80211_sta_block_awake(ar->hw, sta, false);
0151
0152 out_rcu:
0153 rcu_read_unlock();
0154 }
0155
0156 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
0157 {
0158 int queue;
0159
0160 queue = skb_get_queue_mapping(skb);
0161
0162 spin_lock_bh(&ar->tx_stats_lock);
0163
0164 ar->tx_stats[queue].len--;
0165
0166 if (!is_mem_full(ar)) {
0167 unsigned int i;
0168 for (i = 0; i < ar->hw->queues; i++) {
0169 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
0170 continue;
0171
0172 if (ieee80211_queue_stopped(ar->hw, i)) {
0173 unsigned long tmp;
0174
0175 tmp = jiffies - ar->queue_stop_timeout[i];
0176 if (tmp > ar->max_queue_stop_timeout[i])
0177 ar->max_queue_stop_timeout[i] = tmp;
0178 }
0179
0180 ieee80211_wake_queue(ar->hw, i);
0181 }
0182 }
0183
0184 spin_unlock_bh(&ar->tx_stats_lock);
0185
0186 if (atomic_dec_and_test(&ar->tx_total_queued))
0187 complete(&ar->tx_flush);
0188 }
0189
0190 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
0191 {
0192 struct _carl9170_tx_superframe *super = (void *) skb->data;
0193 unsigned int chunks;
0194 int cookie = -1;
0195
0196 atomic_inc(&ar->mem_allocs);
0197
0198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
0199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
0200 atomic_add(chunks, &ar->mem_free_blocks);
0201 return -ENOSPC;
0202 }
0203
0204 spin_lock_bh(&ar->mem_lock);
0205 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
0206 spin_unlock_bh(&ar->mem_lock);
0207
0208 if (unlikely(cookie < 0)) {
0209 atomic_add(chunks, &ar->mem_free_blocks);
0210 return -ENOSPC;
0211 }
0212
0213 super = (void *) skb->data;
0214
0215
0216
0217
0218
0219
0220
0221
0222 super->s.cookie = (u8) cookie + 1;
0223 return 0;
0224 }
0225
0226 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
0227 {
0228 struct _carl9170_tx_superframe *super = (void *) skb->data;
0229 int cookie;
0230
0231
0232 cookie = super->s.cookie;
0233
0234 super->s.cookie = 0;
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 if (WARN_ON_ONCE(cookie == 0) ||
0250 WARN_ON_ONCE(cookie > ar->fw.mem_blocks))
0251 return;
0252
0253 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
0254 &ar->mem_free_blocks);
0255
0256 spin_lock_bh(&ar->mem_lock);
0257 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
0258 spin_unlock_bh(&ar->mem_lock);
0259 }
0260
0261
0262 static void carl9170_tx_release(struct kref *ref)
0263 {
0264 struct ar9170 *ar;
0265 struct carl9170_tx_info *arinfo;
0266 struct ieee80211_tx_info *txinfo;
0267 struct sk_buff *skb;
0268
0269 arinfo = container_of(ref, struct carl9170_tx_info, ref);
0270 txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
0271 rate_driver_data);
0272 skb = container_of((void *) txinfo, struct sk_buff, cb);
0273
0274 ar = arinfo->ar;
0275 if (WARN_ON_ONCE(!ar))
0276 return;
0277
0278
0279
0280
0281
0282
0283 memset_after(&txinfo->status, 0, rates);
0284
0285 if (atomic_read(&ar->tx_total_queued))
0286 ar->tx_schedule = true;
0287
0288 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
0289 if (!atomic_read(&ar->tx_ampdu_upload))
0290 ar->tx_ampdu_schedule = true;
0291
0292 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
0293 struct _carl9170_tx_superframe *super;
0294
0295 super = (void *)skb->data;
0296 txinfo->status.ampdu_len = super->s.rix;
0297 txinfo->status.ampdu_ack_len = super->s.cnt;
0298 } else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) &&
0299 !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 ieee80211_free_txskb(ar->hw, skb);
0317 return;
0318 } else {
0319
0320
0321
0322
0323 }
0324 }
0325
0326 skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
0327 ieee80211_tx_status_irqsafe(ar->hw, skb);
0328 }
0329
0330 void carl9170_tx_get_skb(struct sk_buff *skb)
0331 {
0332 struct carl9170_tx_info *arinfo = (void *)
0333 (IEEE80211_SKB_CB(skb))->rate_driver_data;
0334 kref_get(&arinfo->ref);
0335 }
0336
0337 int carl9170_tx_put_skb(struct sk_buff *skb)
0338 {
0339 struct carl9170_tx_info *arinfo = (void *)
0340 (IEEE80211_SKB_CB(skb))->rate_driver_data;
0341
0342 return kref_put(&arinfo->ref, carl9170_tx_release);
0343 }
0344
0345
0346 static void carl9170_tx_shift_bm(struct ar9170 *ar,
0347 struct carl9170_sta_tid *tid_info, u16 seq)
0348 {
0349 u16 off;
0350
0351 off = SEQ_DIFF(seq, tid_info->bsn);
0352
0353 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
0354 return;
0355
0356
0357
0358
0359
0360
0361
0362 WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
0363
0364 off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
0365 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
0366 return;
0367
0368 if (!bitmap_empty(tid_info->bitmap, off))
0369 off = find_first_bit(tid_info->bitmap, off);
0370
0371 tid_info->bsn += off;
0372 tid_info->bsn &= 0x0fff;
0373
0374 bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
0375 off, CARL9170_BAW_BITS);
0376 }
0377
0378 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
0379 struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
0380 {
0381 struct _carl9170_tx_superframe *super = (void *) skb->data;
0382 struct ieee80211_hdr *hdr = (void *) super->frame_data;
0383 struct ieee80211_sta *sta;
0384 struct carl9170_sta_info *sta_info;
0385 struct carl9170_sta_tid *tid_info;
0386 u8 tid;
0387
0388 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
0389 txinfo->flags & IEEE80211_TX_CTL_INJECTED)
0390 return;
0391
0392 rcu_read_lock();
0393 sta = __carl9170_get_tx_sta(ar, skb);
0394 if (unlikely(!sta))
0395 goto out_rcu;
0396
0397 tid = ieee80211_get_tid(hdr);
0398
0399 sta_info = (void *) sta->drv_priv;
0400 tid_info = rcu_dereference(sta_info->agg[tid]);
0401 if (!tid_info)
0402 goto out_rcu;
0403
0404 spin_lock_bh(&tid_info->lock);
0405 if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
0406 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
0407
0408 if (sta_info->stats[tid].clear) {
0409 sta_info->stats[tid].clear = false;
0410 sta_info->stats[tid].req = false;
0411 sta_info->stats[tid].ampdu_len = 0;
0412 sta_info->stats[tid].ampdu_ack_len = 0;
0413 }
0414
0415 sta_info->stats[tid].ampdu_len++;
0416 if (txinfo->status.rates[0].count == 1)
0417 sta_info->stats[tid].ampdu_ack_len++;
0418
0419 if (!(txinfo->flags & IEEE80211_TX_STAT_ACK))
0420 sta_info->stats[tid].req = true;
0421
0422 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
0423 super->s.rix = sta_info->stats[tid].ampdu_len;
0424 super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
0425 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
0426 if (sta_info->stats[tid].req)
0427 txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
0428
0429 sta_info->stats[tid].clear = true;
0430 }
0431 spin_unlock_bh(&tid_info->lock);
0432
0433 out_rcu:
0434 rcu_read_unlock();
0435 }
0436
0437 static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
0438 struct ieee80211_tx_info *tx_info)
0439 {
0440 struct _carl9170_tx_superframe *super = (void *) skb->data;
0441 struct ieee80211_bar *bar = (void *) super->frame_data;
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
0453 !(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
0454 struct carl9170_bar_list_entry *entry;
0455 int queue = skb_get_queue_mapping(skb);
0456
0457 rcu_read_lock();
0458 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
0459 if (entry->skb == skb) {
0460 spin_lock_bh(&ar->bar_list_lock[queue]);
0461 list_del_rcu(&entry->list);
0462 spin_unlock_bh(&ar->bar_list_lock[queue]);
0463 kfree_rcu(entry, head);
0464 goto out;
0465 }
0466 }
0467
0468 WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
0469 queue, bar->ra, bar->ta, bar->control,
0470 bar->start_seq_num);
0471 out:
0472 rcu_read_unlock();
0473 }
0474 }
0475
0476 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
0477 const bool success)
0478 {
0479 struct ieee80211_tx_info *txinfo;
0480
0481 carl9170_tx_accounting_free(ar, skb);
0482
0483 txinfo = IEEE80211_SKB_CB(skb);
0484
0485 carl9170_tx_bar_status(ar, skb, txinfo);
0486
0487 if (success)
0488 txinfo->flags |= IEEE80211_TX_STAT_ACK;
0489 else
0490 ar->tx_ack_failures++;
0491
0492 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
0493 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
0494
0495 carl9170_tx_ps_unblock(ar, skb);
0496 carl9170_tx_put_skb(skb);
0497 }
0498
0499
0500 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
0501 {
0502 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
0503
0504 atomic_dec(&ar->tx_total_pending);
0505
0506 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
0507 atomic_dec(&ar->tx_ampdu_upload);
0508
0509 if (carl9170_tx_put_skb(skb))
0510 tasklet_hi_schedule(&ar->usb_tasklet);
0511 }
0512
0513 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
0514 struct sk_buff_head *queue)
0515 {
0516 struct sk_buff *skb;
0517
0518 spin_lock_bh(&queue->lock);
0519 skb_queue_walk(queue, skb) {
0520 struct _carl9170_tx_superframe *txc = (void *) skb->data;
0521
0522 if (txc->s.cookie != cookie)
0523 continue;
0524
0525 __skb_unlink(skb, queue);
0526 spin_unlock_bh(&queue->lock);
0527
0528 carl9170_release_dev_space(ar, skb);
0529 return skb;
0530 }
0531 spin_unlock_bh(&queue->lock);
0532
0533 return NULL;
0534 }
0535
0536 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
0537 unsigned int tries, struct ieee80211_tx_info *txinfo)
0538 {
0539 unsigned int i;
0540
0541 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
0542 if (txinfo->status.rates[i].idx < 0)
0543 break;
0544
0545 if (i == rix) {
0546 txinfo->status.rates[i].count = tries;
0547 i++;
0548 break;
0549 }
0550 }
0551
0552 for (; i < IEEE80211_TX_MAX_RATES; i++) {
0553 txinfo->status.rates[i].idx = -1;
0554 txinfo->status.rates[i].count = 0;
0555 }
0556 }
0557
0558 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
0559 {
0560 int i;
0561 struct sk_buff *skb;
0562 struct ieee80211_tx_info *txinfo;
0563 struct carl9170_tx_info *arinfo;
0564 bool restart = false;
0565
0566 for (i = 0; i < ar->hw->queues; i++) {
0567 spin_lock_bh(&ar->tx_status[i].lock);
0568
0569 skb = skb_peek(&ar->tx_status[i]);
0570
0571 if (!skb)
0572 goto next;
0573
0574 txinfo = IEEE80211_SKB_CB(skb);
0575 arinfo = (void *) txinfo->rate_driver_data;
0576
0577 if (time_is_before_jiffies(arinfo->timeout +
0578 msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
0579 restart = true;
0580
0581 next:
0582 spin_unlock_bh(&ar->tx_status[i].lock);
0583 }
0584
0585 if (restart) {
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
0600 }
0601 }
0602
0603 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
0604 {
0605 struct carl9170_sta_tid *iter;
0606 struct sk_buff *skb;
0607 struct ieee80211_tx_info *txinfo;
0608 struct carl9170_tx_info *arinfo;
0609 struct ieee80211_sta *sta;
0610
0611 rcu_read_lock();
0612 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
0613 if (iter->state < CARL9170_TID_STATE_IDLE)
0614 continue;
0615
0616 spin_lock_bh(&iter->lock);
0617 skb = skb_peek(&iter->queue);
0618 if (!skb)
0619 goto unlock;
0620
0621 txinfo = IEEE80211_SKB_CB(skb);
0622 arinfo = (void *)txinfo->rate_driver_data;
0623 if (time_is_after_jiffies(arinfo->timeout +
0624 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
0625 goto unlock;
0626
0627 sta = iter->sta;
0628 if (WARN_ON(!sta))
0629 goto unlock;
0630
0631 ieee80211_stop_tx_ba_session(sta, iter->tid);
0632 unlock:
0633 spin_unlock_bh(&iter->lock);
0634
0635 }
0636 rcu_read_unlock();
0637 }
0638
0639 void carl9170_tx_janitor(struct work_struct *work)
0640 {
0641 struct ar9170 *ar = container_of(work, struct ar9170,
0642 tx_janitor.work);
0643 if (!IS_STARTED(ar))
0644 return;
0645
0646 ar->tx_janitor_last_run = jiffies;
0647
0648 carl9170_check_queue_stop_timeout(ar);
0649 carl9170_tx_ampdu_timeout(ar);
0650
0651 if (!atomic_read(&ar->tx_total_queued))
0652 return;
0653
0654 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
0655 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
0656 }
0657
0658 static void __carl9170_tx_process_status(struct ar9170 *ar,
0659 const uint8_t cookie, const uint8_t info)
0660 {
0661 struct sk_buff *skb;
0662 struct ieee80211_tx_info *txinfo;
0663 unsigned int r, t, q;
0664 bool success = true;
0665
0666 q = ar9170_qmap(info & CARL9170_TX_STATUS_QUEUE);
0667
0668 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
0669 if (!skb) {
0670
0671
0672
0673
0674 return ;
0675 }
0676
0677 txinfo = IEEE80211_SKB_CB(skb);
0678
0679 if (!(info & CARL9170_TX_STATUS_SUCCESS))
0680 success = false;
0681
0682 r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
0683 t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
0684
0685 carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
0686 carl9170_tx_status(ar, skb, success);
0687 }
0688
0689 void carl9170_tx_process_status(struct ar9170 *ar,
0690 const struct carl9170_rsp *cmd)
0691 {
0692 unsigned int i;
0693
0694 for (i = 0; i < cmd->hdr.ext; i++) {
0695 if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
0696 print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
0697 (void *) cmd, cmd->hdr.len + 4);
0698 break;
0699 }
0700
0701 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
0702 cmd->_tx_status[i].info);
0703 }
0704 }
0705
0706 static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
0707 struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
0708 unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
0709 {
0710 struct ieee80211_rate *rate = NULL;
0711 u8 *txpower;
0712 unsigned int idx;
0713
0714 idx = txrate->idx;
0715 *tpc = 0;
0716 *phyrate = 0;
0717
0718 if (txrate->flags & IEEE80211_TX_RC_MCS) {
0719 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
0720
0721 *tpc += 2;
0722
0723 if (info->band == NL80211_BAND_2GHZ)
0724 txpower = ar->power_2G_ht40;
0725 else
0726 txpower = ar->power_5G_ht40;
0727 } else {
0728 if (info->band == NL80211_BAND_2GHZ)
0729 txpower = ar->power_2G_ht20;
0730 else
0731 txpower = ar->power_5G_ht20;
0732 }
0733
0734 *phyrate = txrate->idx;
0735 *tpc += txpower[idx & 7];
0736 } else {
0737 if (info->band == NL80211_BAND_2GHZ) {
0738 if (idx < 4)
0739 txpower = ar->power_2G_cck;
0740 else
0741 txpower = ar->power_2G_ofdm;
0742 } else {
0743 txpower = ar->power_5G_leg;
0744 idx += 4;
0745 }
0746
0747 rate = &__carl9170_ratetable[idx];
0748 *tpc += txpower[(rate->hw_value & 0x30) >> 4];
0749 *phyrate = rate->hw_value & 0xf;
0750 }
0751
0752 if (ar->eeprom.tx_mask == 1) {
0753 *chains = AR9170_TX_PHY_TXCHAIN_1;
0754 } else {
0755 if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
0756 rate && rate->bitrate >= 360)
0757 *chains = AR9170_TX_PHY_TXCHAIN_1;
0758 else
0759 *chains = AR9170_TX_PHY_TXCHAIN_2;
0760 }
0761
0762 *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2);
0763 }
0764
0765 static __le32 carl9170_tx_physet(struct ar9170 *ar,
0766 struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
0767 {
0768 unsigned int power = 0, chains = 0, phyrate = 0;
0769 __le32 tmp;
0770
0771 tmp = cpu_to_le32(0);
0772
0773 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
0774 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
0775 AR9170_TX_PHY_BW_S);
0776
0777 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
0778 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
0779 AR9170_TX_PHY_BW_S);
0780
0781 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
0782 tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
0783
0784 if (txrate->flags & IEEE80211_TX_RC_MCS) {
0785 SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
0786
0787
0788 tmp |= cpu_to_le32((txrate->idx & 0x7) <<
0789 AR9170_TX_PHY_TX_HEAVY_CLIP_S);
0790
0791 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
0792
0793
0794
0795
0796
0797
0798
0799 } else {
0800 if (info->band == NL80211_BAND_2GHZ) {
0801 if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
0802 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
0803 else
0804 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
0805 } else {
0806 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
0807 }
0808
0809
0810
0811
0812
0813
0814
0815 }
0816 carl9170_tx_rate_tpc_chains(ar, info, txrate,
0817 &phyrate, &power, &chains);
0818
0819 tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
0820 tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
0821 tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
0822 return tmp;
0823 }
0824
0825 static bool carl9170_tx_rts_check(struct ar9170 *ar,
0826 struct ieee80211_tx_rate *rate,
0827 bool ampdu, bool multi)
0828 {
0829 switch (ar->erp_mode) {
0830 case CARL9170_ERP_AUTO:
0831 if (ampdu)
0832 break;
0833 fallthrough;
0834
0835 case CARL9170_ERP_MAC80211:
0836 if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
0837 break;
0838 fallthrough;
0839
0840 case CARL9170_ERP_RTS:
0841 if (likely(!multi))
0842 return true;
0843 break;
0844
0845 default:
0846 break;
0847 }
0848
0849 return false;
0850 }
0851
0852 static bool carl9170_tx_cts_check(struct ar9170 *ar,
0853 struct ieee80211_tx_rate *rate)
0854 {
0855 switch (ar->erp_mode) {
0856 case CARL9170_ERP_AUTO:
0857 case CARL9170_ERP_MAC80211:
0858 if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
0859 break;
0860 fallthrough;
0861
0862 case CARL9170_ERP_CTS:
0863 return true;
0864
0865 default:
0866 break;
0867 }
0868
0869 return false;
0870 }
0871
0872 static void carl9170_tx_get_rates(struct ar9170 *ar,
0873 struct ieee80211_vif *vif,
0874 struct ieee80211_sta *sta,
0875 struct sk_buff *skb)
0876 {
0877 struct ieee80211_tx_info *info;
0878
0879 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
0880 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE);
0881
0882 info = IEEE80211_SKB_CB(skb);
0883
0884 ieee80211_get_tx_rates(vif, sta, skb,
0885 info->control.rates,
0886 IEEE80211_TX_MAX_RATES);
0887 }
0888
0889 static void carl9170_tx_apply_rateset(struct ar9170 *ar,
0890 struct ieee80211_tx_info *sinfo,
0891 struct sk_buff *skb)
0892 {
0893 struct ieee80211_tx_rate *txrate;
0894 struct ieee80211_tx_info *info;
0895 struct _carl9170_tx_superframe *txc = (void *) skb->data;
0896 int i;
0897 bool ampdu;
0898 bool no_ack;
0899
0900 info = IEEE80211_SKB_CB(skb);
0901 ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
0902 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
0903
0904
0905
0906
0907
0908 info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
0909
0910
0911
0912
0913
0914 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
0915 __le32 phy_set;
0916
0917 txrate = &sinfo->control.rates[i];
0918 if (txrate->idx < 0)
0919 break;
0920
0921 phy_set = carl9170_tx_physet(ar, info, txrate);
0922 if (i == 0) {
0923 __le16 mac_tmp = cpu_to_le16(0);
0924
0925
0926 txc->f.phy_control = phy_set;
0927
0928 if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
0929 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
0930
0931 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
0932 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
0933 else if (carl9170_tx_cts_check(ar, txrate))
0934 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
0935
0936 txc->f.mac_control |= mac_tmp;
0937 } else {
0938
0939
0940
0941 txc->s.rr[i - 1] = phy_set;
0942 }
0943
0944 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
0945 txrate->count);
0946
0947 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
0948 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
0949 CARL9170_TX_SUPER_RI_ERP_PROT_S);
0950 else if (carl9170_tx_cts_check(ar, txrate))
0951 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
0952 CARL9170_TX_SUPER_RI_ERP_PROT_S);
0953
0954 if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
0955 txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
0956 }
0957 }
0958
0959 static int carl9170_tx_prepare(struct ar9170 *ar,
0960 struct ieee80211_sta *sta,
0961 struct sk_buff *skb)
0962 {
0963 struct ieee80211_hdr *hdr;
0964 struct _carl9170_tx_superframe *txc;
0965 struct carl9170_vif_info *cvif;
0966 struct ieee80211_tx_info *info;
0967 struct carl9170_tx_info *arinfo;
0968 unsigned int hw_queue;
0969 __le16 mac_tmp;
0970 u16 len;
0971
0972 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
0973 BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
0974 CARL9170_TX_SUPERDESC_LEN);
0975
0976 BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
0977 AR9170_TX_HWDESC_LEN);
0978
0979 BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
0980 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
0981 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
0982
0983 hw_queue = ar9170_qmap(carl9170_get_queue(ar, skb));
0984
0985 hdr = (void *)skb->data;
0986 info = IEEE80211_SKB_CB(skb);
0987 len = skb->len;
0988
0989
0990
0991
0992
0993 if (likely(info->control.vif))
0994 cvif = (void *) info->control.vif->drv_priv;
0995 else
0996 cvif = NULL;
0997
0998 txc = skb_push(skb, sizeof(*txc));
0999 memset(txc, 0, sizeof(*txc));
1000
1001 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
1002
1003 if (likely(cvif))
1004 SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
1005
1006 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
1007 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
1008
1009 if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
1010 txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
1011
1012 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
1013 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
1014
1015 mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
1016 AR9170_TX_MAC_BACKOFF);
1017 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
1018 AR9170_TX_MAC_QOS);
1019
1020 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK))
1021 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1022
1023 if (info->control.hw_key) {
1024 len += info->control.hw_key->icv_len;
1025
1026 switch (info->control.hw_key->cipher) {
1027 case WLAN_CIPHER_SUITE_WEP40:
1028 case WLAN_CIPHER_SUITE_WEP104:
1029 case WLAN_CIPHER_SUITE_TKIP:
1030 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
1031 break;
1032 case WLAN_CIPHER_SUITE_CCMP:
1033 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
1034 break;
1035 default:
1036 WARN_ON(1);
1037 goto err_out;
1038 }
1039 }
1040
1041 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1042 unsigned int density, factor;
1043
1044 if (unlikely(!sta || !cvif))
1045 goto err_out;
1046
1047 factor = min_t(unsigned int, 1u,
1048 sta->deflink.ht_cap.ampdu_factor);
1049 density = sta->deflink.ht_cap.ampdu_density;
1050
1051 if (density) {
1052
1053
1054
1055
1056
1057
1058
1059 density = max_t(unsigned int, density + 1, 7u);
1060 }
1061
1062 SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
1063 txc->s.ampdu_settings, density);
1064
1065 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
1066 txc->s.ampdu_settings, factor);
1067 }
1068
1069 txc->s.len = cpu_to_le16(skb->len);
1070 txc->f.length = cpu_to_le16(len + FCS_LEN);
1071 txc->f.mac_control = mac_tmp;
1072
1073 arinfo = (void *)info->rate_driver_data;
1074 arinfo->timeout = jiffies;
1075 arinfo->ar = ar;
1076 kref_init(&arinfo->ref);
1077 return 0;
1078
1079 err_out:
1080 skb_pull(skb, sizeof(*txc));
1081 return -EINVAL;
1082 }
1083
1084 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
1085 {
1086 struct _carl9170_tx_superframe *super;
1087
1088 super = (void *) skb->data;
1089 super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
1090 }
1091
1092 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
1093 {
1094 struct _carl9170_tx_superframe *super;
1095 int tmp;
1096
1097 super = (void *) skb->data;
1098
1099 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
1100 CARL9170_TX_SUPER_AMPDU_DENSITY_S;
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 if (tmp != ar->current_density) {
1114 ar->current_density = tmp;
1115 super->s.ampdu_settings |=
1116 CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
1117 }
1118
1119 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
1120 CARL9170_TX_SUPER_AMPDU_FACTOR_S;
1121
1122 if (tmp != ar->current_factor) {
1123 ar->current_factor = tmp;
1124 super->s.ampdu_settings |=
1125 CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
1126 }
1127 }
1128
1129 static void carl9170_tx_ampdu(struct ar9170 *ar)
1130 {
1131 struct sk_buff_head agg;
1132 struct carl9170_sta_tid *tid_info;
1133 struct sk_buff *skb, *first;
1134 struct ieee80211_tx_info *tx_info_first;
1135 unsigned int i = 0, done_ampdus = 0;
1136 u16 seq, queue, tmpssn;
1137
1138 atomic_inc(&ar->tx_ampdu_scheduler);
1139 ar->tx_ampdu_schedule = false;
1140
1141 if (atomic_read(&ar->tx_ampdu_upload))
1142 return;
1143
1144 if (!ar->tx_ampdu_list_len)
1145 return;
1146
1147 __skb_queue_head_init(&agg);
1148
1149 rcu_read_lock();
1150 tid_info = rcu_dereference(ar->tx_ampdu_iter);
1151 if (WARN_ON_ONCE(!tid_info)) {
1152 rcu_read_unlock();
1153 return;
1154 }
1155
1156 retry:
1157 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1158 i++;
1159
1160 if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1161 continue;
1162
1163 queue = TID_TO_WME_AC(tid_info->tid);
1164
1165 spin_lock_bh(&tid_info->lock);
1166 if (tid_info->state != CARL9170_TID_STATE_XMIT)
1167 goto processed;
1168
1169 tid_info->counter++;
1170 first = skb_peek(&tid_info->queue);
1171 tmpssn = carl9170_get_seq(first);
1172 seq = tid_info->snx;
1173
1174 if (unlikely(tmpssn != seq)) {
1175 tid_info->state = CARL9170_TID_STATE_IDLE;
1176
1177 goto processed;
1178 }
1179
1180 tx_info_first = NULL;
1181 while ((skb = skb_peek(&tid_info->queue))) {
1182
1183 if (unlikely(carl9170_get_seq(skb) != seq))
1184 break;
1185
1186
1187 if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1188 (tid_info->max - 1)))
1189 break;
1190
1191 if (!tx_info_first) {
1192 carl9170_tx_get_rates(ar, tid_info->vif,
1193 tid_info->sta, first);
1194 tx_info_first = IEEE80211_SKB_CB(first);
1195 }
1196
1197 carl9170_tx_apply_rateset(ar, tx_info_first, skb);
1198
1199 atomic_inc(&ar->tx_ampdu_upload);
1200 tid_info->snx = seq = SEQ_NEXT(seq);
1201 __skb_unlink(skb, &tid_info->queue);
1202
1203 __skb_queue_tail(&agg, skb);
1204
1205 if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1206 break;
1207 }
1208
1209 if (skb_queue_empty(&tid_info->queue) ||
1210 carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1211 tid_info->snx) {
1212
1213
1214
1215
1216 tid_info->state = CARL9170_TID_STATE_IDLE;
1217 }
1218 done_ampdus++;
1219
1220 processed:
1221 spin_unlock_bh(&tid_info->lock);
1222
1223 if (skb_queue_empty(&agg))
1224 continue;
1225
1226
1227 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1228
1229
1230 carl9170_set_immba(ar, skb_peek_tail(&agg));
1231
1232 spin_lock_bh(&ar->tx_pending[queue].lock);
1233 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1234 spin_unlock_bh(&ar->tx_pending[queue].lock);
1235 ar->tx_schedule = true;
1236 }
1237 if ((done_ampdus++ == 0) && (i++ == 0))
1238 goto retry;
1239
1240 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1241 rcu_read_unlock();
1242 }
1243
1244 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1245 struct sk_buff_head *queue)
1246 {
1247 struct sk_buff *skb;
1248 struct ieee80211_tx_info *info;
1249 struct carl9170_tx_info *arinfo;
1250
1251 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1252
1253 spin_lock_bh(&queue->lock);
1254 skb = skb_peek(queue);
1255 if (unlikely(!skb))
1256 goto err_unlock;
1257
1258 if (carl9170_alloc_dev_space(ar, skb))
1259 goto err_unlock;
1260
1261 __skb_unlink(skb, queue);
1262 spin_unlock_bh(&queue->lock);
1263
1264 info = IEEE80211_SKB_CB(skb);
1265 arinfo = (void *) info->rate_driver_data;
1266
1267 arinfo->timeout = jiffies;
1268 return skb;
1269
1270 err_unlock:
1271 spin_unlock_bh(&queue->lock);
1272 return NULL;
1273 }
1274
1275 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1276 {
1277 struct _carl9170_tx_superframe *super;
1278 uint8_t q = 0;
1279
1280 ar->tx_dropped++;
1281
1282 super = (void *)skb->data;
1283 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1284 ar9170_qmap(carl9170_get_queue(ar, skb)));
1285 __carl9170_tx_process_status(ar, super->s.cookie, q);
1286 }
1287
1288 static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
1289 {
1290 struct ieee80211_sta *sta;
1291 struct carl9170_sta_info *sta_info;
1292 struct ieee80211_tx_info *tx_info;
1293
1294 rcu_read_lock();
1295 sta = __carl9170_get_tx_sta(ar, skb);
1296 if (!sta)
1297 goto out_rcu;
1298
1299 sta_info = (void *) sta->drv_priv;
1300 tx_info = IEEE80211_SKB_CB(skb);
1301
1302 if (unlikely(sta_info->sleeping) &&
1303 !(tx_info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
1304 IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
1305 rcu_read_unlock();
1306
1307 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1308 atomic_dec(&ar->tx_ampdu_upload);
1309
1310 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1311 carl9170_release_dev_space(ar, skb);
1312 carl9170_tx_status(ar, skb, false);
1313 return true;
1314 }
1315
1316 out_rcu:
1317 rcu_read_unlock();
1318 return false;
1319 }
1320
1321 static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
1322 {
1323 struct _carl9170_tx_superframe *super = (void *) skb->data;
1324 struct ieee80211_bar *bar = (void *) super->frame_data;
1325
1326 if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
1327 skb->len >= sizeof(struct ieee80211_bar)) {
1328 struct carl9170_bar_list_entry *entry;
1329 unsigned int queue = skb_get_queue_mapping(skb);
1330
1331 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1332 if (!WARN_ON_ONCE(!entry)) {
1333 entry->skb = skb;
1334 spin_lock_bh(&ar->bar_list_lock[queue]);
1335 list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
1336 spin_unlock_bh(&ar->bar_list_lock[queue]);
1337 }
1338 }
1339 }
1340
1341 static void carl9170_tx(struct ar9170 *ar)
1342 {
1343 struct sk_buff *skb;
1344 unsigned int i, q;
1345 bool schedule_garbagecollector = false;
1346
1347 ar->tx_schedule = false;
1348
1349 if (unlikely(!IS_STARTED(ar)))
1350 return;
1351
1352 carl9170_usb_handle_tx_err(ar);
1353
1354 for (i = 0; i < ar->hw->queues; i++) {
1355 while (!skb_queue_empty(&ar->tx_pending[i])) {
1356 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1357 if (unlikely(!skb))
1358 break;
1359
1360 if (unlikely(carl9170_tx_ps_drop(ar, skb)))
1361 continue;
1362
1363 carl9170_bar_check(ar, skb);
1364
1365 atomic_inc(&ar->tx_total_pending);
1366
1367 q = __carl9170_get_queue(ar, i);
1368
1369
1370
1371
1372 skb_queue_tail(&ar->tx_status[q], skb);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 carl9170_tx_get_skb(skb);
1383
1384 carl9170_usb_tx(ar, skb);
1385 schedule_garbagecollector = true;
1386 }
1387 }
1388
1389 if (!schedule_garbagecollector)
1390 return;
1391
1392 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1393 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1394 }
1395
1396 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1397 struct ieee80211_sta *sta, struct sk_buff *skb,
1398 struct ieee80211_tx_info *txinfo)
1399 {
1400 struct carl9170_sta_info *sta_info;
1401 struct carl9170_sta_tid *agg;
1402 struct sk_buff *iter;
1403 u16 tid, seq, qseq, off;
1404 bool run = false;
1405
1406 tid = carl9170_get_tid(skb);
1407 seq = carl9170_get_seq(skb);
1408 sta_info = (void *) sta->drv_priv;
1409
1410 rcu_read_lock();
1411 agg = rcu_dereference(sta_info->agg[tid]);
1412
1413 if (!agg)
1414 goto err_unlock_rcu;
1415
1416 spin_lock_bh(&agg->lock);
1417 if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1418 goto err_unlock;
1419
1420
1421 if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1422 goto err_unlock;
1423
1424 if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1425 goto err_unlock;
1426
1427 off = SEQ_DIFF(seq, agg->bsn);
1428 if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1429 goto err_unlock;
1430
1431 if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1432 __skb_queue_tail(&agg->queue, skb);
1433 agg->hsn = seq;
1434 goto queued;
1435 }
1436
1437 skb_queue_reverse_walk(&agg->queue, iter) {
1438 qseq = carl9170_get_seq(iter);
1439
1440 if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1441 __skb_queue_after(&agg->queue, iter, skb);
1442 goto queued;
1443 }
1444 }
1445
1446 __skb_queue_head(&agg->queue, skb);
1447 queued:
1448
1449 if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1450 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1451 agg->state = CARL9170_TID_STATE_XMIT;
1452 run = true;
1453 }
1454 }
1455
1456 spin_unlock_bh(&agg->lock);
1457 rcu_read_unlock();
1458
1459 return run;
1460
1461 err_unlock:
1462 spin_unlock_bh(&agg->lock);
1463
1464 err_unlock_rcu:
1465 rcu_read_unlock();
1466 txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
1467 carl9170_tx_status(ar, skb, false);
1468 ar->tx_dropped++;
1469 return false;
1470 }
1471
1472 void carl9170_op_tx(struct ieee80211_hw *hw,
1473 struct ieee80211_tx_control *control,
1474 struct sk_buff *skb)
1475 {
1476 struct ar9170 *ar = hw->priv;
1477 struct ieee80211_tx_info *info;
1478 struct ieee80211_sta *sta = control->sta;
1479 struct ieee80211_vif *vif;
1480 bool run;
1481
1482 if (unlikely(!IS_STARTED(ar)))
1483 goto err_free;
1484
1485 info = IEEE80211_SKB_CB(skb);
1486 vif = info->control.vif;
1487
1488 if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
1489 goto err_free;
1490
1491 carl9170_tx_accounting(ar, skb);
1492
1493
1494
1495
1496
1497 if (sta) {
1498 struct carl9170_sta_info *stai = (void *) sta->drv_priv;
1499 atomic_inc(&stai->pending_frames);
1500 }
1501
1502 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1503
1504
1505
1506
1507
1508
1509
1510 run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
1511 if (run)
1512 carl9170_tx_ampdu(ar);
1513
1514 } else {
1515 unsigned int queue = skb_get_queue_mapping(skb);
1516
1517 carl9170_tx_get_rates(ar, vif, sta, skb);
1518 carl9170_tx_apply_rateset(ar, info, skb);
1519 skb_queue_tail(&ar->tx_pending[queue], skb);
1520 }
1521
1522 carl9170_tx(ar);
1523 return;
1524
1525 err_free:
1526 ar->tx_dropped++;
1527 ieee80211_free_txskb(ar->hw, skb);
1528 }
1529
1530 void carl9170_tx_scheduler(struct ar9170 *ar)
1531 {
1532
1533 if (ar->tx_ampdu_schedule)
1534 carl9170_tx_ampdu(ar);
1535
1536 if (ar->tx_schedule)
1537 carl9170_tx(ar);
1538 }
1539
1540
1541 static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
1542 {
1543 struct carl9170_vif_info *cvif;
1544 int i = 1;
1545
1546
1547
1548
1549
1550
1551
1552
1553 cvif = rcu_dereference(ar->beacon_iter);
1554 if (ar->vifs > 0 && cvif) {
1555 do {
1556 list_for_each_entry_continue_rcu(cvif, &ar->vif_list,
1557 list) {
1558 if (cvif->active && cvif->enable_beacon)
1559 goto out;
1560 }
1561 } while (ar->beacon_enabled && i--);
1562
1563
1564 return NULL;
1565 }
1566
1567 out:
1568 RCU_INIT_POINTER(ar->beacon_iter, cvif);
1569 return cvif;
1570 }
1571
1572 static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
1573 u32 *ht1, u32 *plcp)
1574 {
1575 struct ieee80211_tx_info *txinfo;
1576 struct ieee80211_tx_rate *rate;
1577 unsigned int power, chains;
1578 bool ht_rate;
1579
1580 txinfo = IEEE80211_SKB_CB(skb);
1581 rate = &txinfo->control.rates[0];
1582 ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS);
1583 carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains);
1584
1585 *ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
1586 if (chains == AR9170_TX_PHY_TXCHAIN_2)
1587 *ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
1588 SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7);
1589 SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power);
1590 SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains);
1591
1592 if (ht_rate) {
1593 *ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
1594 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
1595 *plcp |= AR9170_MAC_BCN_HT2_SGI;
1596
1597 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1598 *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
1599 *plcp |= AR9170_MAC_BCN_HT2_BW40;
1600 } else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
1601 *ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
1602 *plcp |= AR9170_MAC_BCN_HT2_BW40;
1603 }
1604
1605 SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN);
1606 } else {
1607 if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M)
1608 *plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
1609 else
1610 *plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
1611 }
1612
1613 return ht_rate;
1614 }
1615
1616 int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
1617 {
1618 struct sk_buff *skb = NULL;
1619 struct carl9170_vif_info *cvif;
1620 __le32 *data, *old = NULL;
1621 u32 word, ht1, plcp, off, addr, len;
1622 int i = 0, err = 0;
1623 bool ht_rate;
1624
1625 rcu_read_lock();
1626 cvif = carl9170_pick_beaconing_vif(ar);
1627 if (!cvif)
1628 goto out_unlock;
1629
1630 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
1631 NULL, NULL, 0);
1632
1633 if (!skb) {
1634 err = -ENOMEM;
1635 goto err_free;
1636 }
1637
1638 spin_lock_bh(&ar->beacon_lock);
1639 data = (__le32 *)skb->data;
1640 if (cvif->beacon)
1641 old = (__le32 *)cvif->beacon->data;
1642
1643 off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
1644 addr = ar->fw.beacon_addr + off;
1645 len = roundup(skb->len + FCS_LEN, 4);
1646
1647 if ((off + len) > ar->fw.beacon_max_len) {
1648 if (net_ratelimit()) {
1649 wiphy_err(ar->hw->wiphy, "beacon does not "
1650 "fit into device memory!\n");
1651 }
1652 err = -EINVAL;
1653 goto err_unlock;
1654 }
1655
1656 if (len > AR9170_MAC_BCN_LENGTH_MAX) {
1657 if (net_ratelimit()) {
1658 wiphy_err(ar->hw->wiphy, "no support for beacons "
1659 "bigger than %d (yours:%d).\n",
1660 AR9170_MAC_BCN_LENGTH_MAX, len);
1661 }
1662
1663 err = -EMSGSIZE;
1664 goto err_unlock;
1665 }
1666
1667 ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
1668
1669 carl9170_async_regwrite_begin(ar);
1670 carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
1671 if (ht_rate)
1672 carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
1673 else
1674 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
1675
1676 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
1677
1678
1679
1680
1681
1682 if (old && (data[i] == old[i]))
1683 continue;
1684
1685 word = le32_to_cpu(data[i]);
1686 carl9170_async_regwrite(addr + 4 * i, word);
1687 }
1688 carl9170_async_regwrite_finish();
1689
1690 dev_kfree_skb_any(cvif->beacon);
1691 cvif->beacon = NULL;
1692
1693 err = carl9170_async_regwrite_result();
1694 if (!err)
1695 cvif->beacon = skb;
1696 spin_unlock_bh(&ar->beacon_lock);
1697 if (err)
1698 goto err_free;
1699
1700 if (submit) {
1701 err = carl9170_bcn_ctrl(ar, cvif->id,
1702 CARL9170_BCN_CTRL_CAB_TRIGGER,
1703 addr, skb->len + FCS_LEN);
1704
1705 if (err)
1706 goto err_free;
1707 }
1708 out_unlock:
1709 rcu_read_unlock();
1710 return 0;
1711
1712 err_unlock:
1713 spin_unlock_bh(&ar->beacon_lock);
1714
1715 err_free:
1716 rcu_read_unlock();
1717 dev_kfree_skb_any(skb);
1718 return err;
1719 }