0001
0002
0003
0004
0005 #include "main.h"
0006 #include "tx.h"
0007 #include "fw.h"
0008 #include "ps.h"
0009 #include "debug.h"
0010
0011 static
0012 void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
0013 struct sk_buff *skb)
0014 {
0015 struct ieee80211_hdr *hdr;
0016 struct rtw_vif *rtwvif;
0017
0018 hdr = (struct ieee80211_hdr *)skb->data;
0019
0020 if (!ieee80211_is_data(hdr->frame_control))
0021 return;
0022
0023 if (!is_broadcast_ether_addr(hdr->addr1) &&
0024 !is_multicast_ether_addr(hdr->addr1)) {
0025 rtwdev->stats.tx_unicast += skb->len;
0026 rtwdev->stats.tx_cnt++;
0027 if (vif) {
0028 rtwvif = (struct rtw_vif *)vif->drv_priv;
0029 rtwvif->stats.tx_unicast += skb->len;
0030 rtwvif->stats.tx_cnt++;
0031 }
0032 }
0033 }
0034
0035 void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
0036 {
0037 __le32 *txdesc = (__le32 *)skb->data;
0038
0039 SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size);
0040 SET_TX_DESC_OFFSET(txdesc, pkt_info->offset);
0041 SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset);
0042 SET_TX_DESC_QSEL(txdesc, pkt_info->qsel);
0043 SET_TX_DESC_BMC(txdesc, pkt_info->bmc);
0044 SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id);
0045 SET_TX_DESC_DATARATE(txdesc, pkt_info->rate);
0046 SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback);
0047 SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate);
0048 SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type);
0049 SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw);
0050 SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq);
0051 SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor);
0052 SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density);
0053 SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc);
0054 SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc);
0055 SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en);
0056 SET_TX_DESC_LS(txdesc, pkt_info->ls);
0057 SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
0058 SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
0059 SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
0060 SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts);
0061 if (pkt_info->rts) {
0062 SET_TX_DESC_RTSRATE(txdesc, DESC_RATE24M);
0063 SET_TX_DESC_DATA_RTS_SHORT(txdesc, 1);
0064 }
0065 SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq);
0066 SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq);
0067 SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
0068 SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr);
0069 SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null);
0070 if (pkt_info->tim_offset) {
0071 SET_TX_DESC_TIM_EN(txdesc, 1);
0072 SET_TX_DESC_TIM_OFFSET(txdesc, pkt_info->tim_offset);
0073 }
0074 }
0075 EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
0076
0077 static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
0078 {
0079 u8 exp = sta->deflink.ht_cap.ampdu_factor;
0080
0081
0082
0083
0084
0085 return (BIT(2) << exp) - 1;
0086 }
0087
0088 static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
0089 {
0090 return sta->deflink.ht_cap.ampdu_density;
0091 }
0092
0093 static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
0094 struct ieee80211_sta *sta)
0095 {
0096 u8 rate;
0097
0098 if (rtwdev->hal.rf_type == RF_2T2R && sta->deflink.ht_cap.mcs.rx_mask[1] != 0)
0099 rate = DESC_RATEMCS15;
0100 else
0101 rate = DESC_RATEMCS7;
0102
0103 return rate;
0104 }
0105
0106 static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
0107 struct ieee80211_sta *sta)
0108 {
0109 struct rtw_efuse *efuse = &rtwdev->efuse;
0110 u8 rate;
0111 u16 tx_mcs_map;
0112
0113 tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
0114 if (efuse->hw_cap.nss == 1) {
0115 switch (tx_mcs_map & 0x3) {
0116 case IEEE80211_VHT_MCS_SUPPORT_0_7:
0117 rate = DESC_RATEVHT1SS_MCS7;
0118 break;
0119 case IEEE80211_VHT_MCS_SUPPORT_0_8:
0120 rate = DESC_RATEVHT1SS_MCS8;
0121 break;
0122 default:
0123 case IEEE80211_VHT_MCS_SUPPORT_0_9:
0124 rate = DESC_RATEVHT1SS_MCS9;
0125 break;
0126 }
0127 } else if (efuse->hw_cap.nss >= 2) {
0128 switch ((tx_mcs_map & 0xc) >> 2) {
0129 case IEEE80211_VHT_MCS_SUPPORT_0_7:
0130 rate = DESC_RATEVHT2SS_MCS7;
0131 break;
0132 case IEEE80211_VHT_MCS_SUPPORT_0_8:
0133 rate = DESC_RATEVHT2SS_MCS8;
0134 break;
0135 default:
0136 case IEEE80211_VHT_MCS_SUPPORT_0_9:
0137 rate = DESC_RATEVHT2SS_MCS9;
0138 break;
0139 }
0140 } else {
0141 rate = DESC_RATEVHT1SS_MCS9;
0142 }
0143
0144 return rate;
0145 }
0146
0147 static void rtw_tx_report_enable(struct rtw_dev *rtwdev,
0148 struct rtw_tx_pkt_info *pkt_info)
0149 {
0150 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
0151
0152
0153
0154
0155
0156 pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc;
0157 pkt_info->report = true;
0158 }
0159
0160 void rtw_tx_report_purge_timer(struct timer_list *t)
0161 {
0162 struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
0163 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
0164 unsigned long flags;
0165
0166 if (skb_queue_len(&tx_report->queue) == 0)
0167 return;
0168
0169 rtw_warn(rtwdev, "failed to get tx report from firmware\n");
0170
0171 spin_lock_irqsave(&tx_report->q_lock, flags);
0172 skb_queue_purge(&tx_report->queue);
0173 spin_unlock_irqrestore(&tx_report->q_lock, flags);
0174 }
0175
0176 void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn)
0177 {
0178 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
0179 unsigned long flags;
0180 u8 *drv_data;
0181
0182
0183 drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data;
0184 *drv_data = sn;
0185
0186 spin_lock_irqsave(&tx_report->q_lock, flags);
0187 __skb_queue_tail(&tx_report->queue, skb);
0188 spin_unlock_irqrestore(&tx_report->q_lock, flags);
0189
0190 mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT);
0191 }
0192 EXPORT_SYMBOL(rtw_tx_report_enqueue);
0193
0194 static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
0195 struct sk_buff *skb, bool acked)
0196 {
0197 struct ieee80211_tx_info *info;
0198
0199 info = IEEE80211_SKB_CB(skb);
0200 ieee80211_tx_info_clear_status(info);
0201 if (acked)
0202 info->flags |= IEEE80211_TX_STAT_ACK;
0203 else
0204 info->flags &= ~IEEE80211_TX_STAT_ACK;
0205
0206 ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
0207 }
0208
0209 void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src)
0210 {
0211 struct rtw_tx_report *tx_report = &rtwdev->tx_report;
0212 struct rtw_c2h_cmd *c2h;
0213 struct sk_buff *cur, *tmp;
0214 unsigned long flags;
0215 u8 sn, st;
0216 u8 *n;
0217
0218 c2h = get_c2h_from_skb(skb);
0219
0220 if (src == C2H_CCX_TX_RPT) {
0221 sn = GET_CCX_REPORT_SEQNUM_V0(c2h->payload);
0222 st = GET_CCX_REPORT_STATUS_V0(c2h->payload);
0223 } else {
0224 sn = GET_CCX_REPORT_SEQNUM_V1(c2h->payload);
0225 st = GET_CCX_REPORT_STATUS_V1(c2h->payload);
0226 }
0227
0228 spin_lock_irqsave(&tx_report->q_lock, flags);
0229 skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
0230 n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data;
0231 if (*n == sn) {
0232 __skb_unlink(cur, &tx_report->queue);
0233 rtw_tx_report_tx_status(rtwdev, cur, st == 0);
0234 break;
0235 }
0236 }
0237 spin_unlock_irqrestore(&tx_report->q_lock, flags);
0238 }
0239
0240 static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb,
0241 u8 lowest_rate, bool ignore_rate)
0242 {
0243 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0244 struct ieee80211_vif *vif = tx_info->control.vif;
0245 bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags);
0246
0247 if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest)
0248 return lowest_rate;
0249
0250 return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
0251 }
0252
0253 static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev,
0254 struct rtw_tx_pkt_info *pkt_info,
0255 struct sk_buff *skb,
0256 bool ignore_rate)
0257 {
0258 if (rtwdev->hal.current_band_type == RTW_BAND_2G) {
0259 pkt_info->rate_id = RTW_RATEID_B_20M;
0260 pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M,
0261 ignore_rate);
0262 } else {
0263 pkt_info->rate_id = RTW_RATEID_G;
0264 pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M,
0265 ignore_rate);
0266 }
0267
0268 pkt_info->use_rate = true;
0269 pkt_info->dis_rate_fallback = true;
0270 }
0271
0272 static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev,
0273 struct rtw_tx_pkt_info *pkt_info,
0274 struct sk_buff *skb)
0275 {
0276 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0277 u8 sec_type = 0;
0278
0279 if (info && info->control.hw_key) {
0280 struct ieee80211_key_conf *key = info->control.hw_key;
0281
0282 switch (key->cipher) {
0283 case WLAN_CIPHER_SUITE_WEP40:
0284 case WLAN_CIPHER_SUITE_WEP104:
0285 case WLAN_CIPHER_SUITE_TKIP:
0286 sec_type = 0x01;
0287 break;
0288 case WLAN_CIPHER_SUITE_CCMP:
0289 sec_type = 0x03;
0290 break;
0291 default:
0292 break;
0293 }
0294 }
0295
0296 pkt_info->sec_type = sec_type;
0297 }
0298
0299 static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
0300 struct rtw_tx_pkt_info *pkt_info,
0301 struct ieee80211_sta *sta,
0302 struct sk_buff *skb)
0303 {
0304 rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false);
0305 pkt_info->dis_qselseq = true;
0306 pkt_info->en_hwseq = true;
0307 pkt_info->hw_ssn_sel = 0;
0308
0309 }
0310
0311 static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
0312 struct rtw_tx_pkt_info *pkt_info,
0313 struct ieee80211_sta *sta,
0314 struct sk_buff *skb)
0315 {
0316 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0317 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0318 struct ieee80211_hw *hw = rtwdev->hw;
0319 struct rtw_dm_info *dm_info = &rtwdev->dm_info;
0320 struct rtw_sta_info *si;
0321 u8 fix_rate;
0322 u16 seq;
0323 u8 ampdu_factor = 0;
0324 u8 ampdu_density = 0;
0325 bool ampdu_en = false;
0326 u8 rate = DESC_RATE6M;
0327 u8 rate_id = 6;
0328 u8 bw = RTW_CHANNEL_WIDTH_20;
0329 bool stbc = false;
0330 bool ldpc = false;
0331
0332 seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
0333
0334
0335 if (!sta)
0336 goto out;
0337
0338 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
0339 ampdu_en = true;
0340 ampdu_factor = get_tx_ampdu_factor(sta);
0341 ampdu_density = get_tx_ampdu_density(sta);
0342 }
0343
0344 if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold)
0345 pkt_info->rts = true;
0346
0347 if (sta->deflink.vht_cap.vht_supported)
0348 rate = get_highest_vht_tx_rate(rtwdev, sta);
0349 else if (sta->deflink.ht_cap.ht_supported)
0350 rate = get_highest_ht_tx_rate(rtwdev, sta);
0351 else if (sta->deflink.supp_rates[0] <= 0xf)
0352 rate = DESC_RATE11M;
0353 else
0354 rate = DESC_RATE54M;
0355
0356 si = (struct rtw_sta_info *)sta->drv_priv;
0357
0358 bw = si->bw_mode;
0359 rate_id = si->rate_id;
0360 stbc = rtwdev->hal.txrx_1ss ? false : si->stbc_en;
0361 ldpc = si->ldpc_en;
0362
0363 out:
0364 pkt_info->seq = seq;
0365 pkt_info->ampdu_factor = ampdu_factor;
0366 pkt_info->ampdu_density = ampdu_density;
0367 pkt_info->ampdu_en = ampdu_en;
0368 pkt_info->rate = rate;
0369 pkt_info->rate_id = rate_id;
0370 pkt_info->bw = bw;
0371 pkt_info->stbc = stbc;
0372 pkt_info->ldpc = ldpc;
0373
0374 fix_rate = dm_info->fix_rate;
0375 if (fix_rate < DESC_RATE_MAX) {
0376 pkt_info->rate = fix_rate;
0377 pkt_info->dis_rate_fallback = true;
0378 pkt_info->use_rate = true;
0379 }
0380 }
0381
0382 void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
0383 struct rtw_tx_pkt_info *pkt_info,
0384 struct ieee80211_sta *sta,
0385 struct sk_buff *skb)
0386 {
0387 struct rtw_chip_info *chip = rtwdev->chip;
0388 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
0389 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0390 struct rtw_sta_info *si;
0391 struct ieee80211_vif *vif = NULL;
0392 __le16 fc = hdr->frame_control;
0393 bool bmc;
0394
0395 if (sta) {
0396 si = (struct rtw_sta_info *)sta->drv_priv;
0397 vif = si->vif;
0398 }
0399
0400 if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
0401 rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, sta, skb);
0402 else if (ieee80211_is_data(fc))
0403 rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb);
0404
0405 bmc = is_broadcast_ether_addr(hdr->addr1) ||
0406 is_multicast_ether_addr(hdr->addr1);
0407
0408 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
0409 rtw_tx_report_enable(rtwdev, pkt_info);
0410
0411 pkt_info->bmc = bmc;
0412 rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb);
0413 pkt_info->tx_pkt_size = skb->len;
0414 pkt_info->offset = chip->tx_pkt_desc_sz;
0415 pkt_info->qsel = skb->priority;
0416 pkt_info->ls = true;
0417
0418
0419 rtw_tx_stats(rtwdev, vif, skb);
0420 }
0421
0422 void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
0423 struct rtw_tx_pkt_info *pkt_info,
0424 struct sk_buff *skb,
0425 enum rtw_rsvd_packet_type type)
0426 {
0427 struct rtw_chip_info *chip = rtwdev->chip;
0428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0429 bool bmc;
0430
0431
0432
0433
0434 if (type != RSVD_BEACON && type != RSVD_DUMMY)
0435 pkt_info->qsel = TX_DESC_QSEL_MGMT;
0436
0437 rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true);
0438
0439 bmc = is_broadcast_ether_addr(hdr->addr1) ||
0440 is_multicast_ether_addr(hdr->addr1);
0441 pkt_info->bmc = bmc;
0442 pkt_info->tx_pkt_size = skb->len;
0443 pkt_info->offset = chip->tx_pkt_desc_sz;
0444 pkt_info->ls = true;
0445 if (type == RSVD_PS_POLL) {
0446 pkt_info->nav_use_hdr = true;
0447 } else {
0448 pkt_info->dis_qselseq = true;
0449 pkt_info->en_hwseq = true;
0450 pkt_info->hw_ssn_sel = 0;
0451 }
0452 if (type == RSVD_QOS_NULL)
0453 pkt_info->bt_null = true;
0454
0455 if (type == RSVD_BEACON) {
0456 struct rtw_rsvd_page *rsvd_pkt;
0457 int hdr_len;
0458
0459 rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
0460 struct rtw_rsvd_page,
0461 build_list);
0462 if (rsvd_pkt && rsvd_pkt->tim_offset != 0) {
0463 hdr_len = sizeof(struct ieee80211_hdr_3addr);
0464 pkt_info->tim_offset = rsvd_pkt->tim_offset - hdr_len;
0465 }
0466 }
0467
0468 rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb);
0469
0470
0471 }
0472
0473 struct sk_buff *
0474 rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
0475 struct rtw_tx_pkt_info *pkt_info,
0476 u8 *buf, u32 size)
0477 {
0478 struct rtw_chip_info *chip = rtwdev->chip;
0479 struct sk_buff *skb;
0480 u32 tx_pkt_desc_sz;
0481 u32 length;
0482
0483 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
0484 length = size + tx_pkt_desc_sz;
0485 skb = dev_alloc_skb(length);
0486 if (!skb) {
0487 rtw_err(rtwdev, "failed to alloc write data rsvd page skb\n");
0488 return NULL;
0489 }
0490
0491 skb_reserve(skb, tx_pkt_desc_sz);
0492 skb_put_data(skb, buf, size);
0493 rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON);
0494
0495 return skb;
0496 }
0497 EXPORT_SYMBOL(rtw_tx_write_data_rsvd_page_get);
0498
0499 struct sk_buff *
0500 rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
0501 struct rtw_tx_pkt_info *pkt_info,
0502 u8 *buf, u32 size)
0503 {
0504 struct rtw_chip_info *chip = rtwdev->chip;
0505 struct sk_buff *skb;
0506 u32 tx_pkt_desc_sz;
0507 u32 length;
0508
0509 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
0510 length = size + tx_pkt_desc_sz;
0511 skb = dev_alloc_skb(length);
0512 if (!skb) {
0513 rtw_err(rtwdev, "failed to alloc write data h2c skb\n");
0514 return NULL;
0515 }
0516
0517 skb_reserve(skb, tx_pkt_desc_sz);
0518 skb_put_data(skb, buf, size);
0519 pkt_info->tx_pkt_size = size;
0520
0521 return skb;
0522 }
0523 EXPORT_SYMBOL(rtw_tx_write_data_h2c_get);
0524
0525 void rtw_tx(struct rtw_dev *rtwdev,
0526 struct ieee80211_tx_control *control,
0527 struct sk_buff *skb)
0528 {
0529 struct rtw_tx_pkt_info pkt_info = {0};
0530 int ret;
0531
0532 rtw_tx_pkt_info_update(rtwdev, &pkt_info, control->sta, skb);
0533 ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb);
0534 if (ret) {
0535 rtw_err(rtwdev, "failed to write TX skb to HCI\n");
0536 goto out;
0537 }
0538
0539 rtw_hci_tx_kick_off(rtwdev);
0540
0541 return;
0542
0543 out:
0544 ieee80211_free_txskb(rtwdev->hw, skb);
0545 }
0546
0547 static void rtw_txq_check_agg(struct rtw_dev *rtwdev,
0548 struct rtw_txq *rtwtxq,
0549 struct sk_buff *skb)
0550 {
0551 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
0552 struct ieee80211_tx_info *info;
0553 struct rtw_sta_info *si;
0554
0555 if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) {
0556 info = IEEE80211_SKB_CB(skb);
0557 info->flags |= IEEE80211_TX_CTL_AMPDU;
0558 return;
0559 }
0560
0561 if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
0562 return;
0563
0564 if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags))
0565 return;
0566
0567 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
0568 return;
0569
0570 if (!txq->sta)
0571 return;
0572
0573 si = (struct rtw_sta_info *)txq->sta->drv_priv;
0574 set_bit(txq->tid, si->tid_ba);
0575
0576 ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work);
0577 }
0578
0579 static int rtw_txq_push_skb(struct rtw_dev *rtwdev,
0580 struct rtw_txq *rtwtxq,
0581 struct sk_buff *skb)
0582 {
0583 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
0584 struct rtw_tx_pkt_info pkt_info = {0};
0585 int ret;
0586
0587 rtw_txq_check_agg(rtwdev, rtwtxq, skb);
0588
0589 rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb);
0590 ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb);
0591 if (ret) {
0592 rtw_err(rtwdev, "failed to write TX skb to HCI\n");
0593 return ret;
0594 }
0595 rtwtxq->last_push = jiffies;
0596
0597 return 0;
0598 }
0599
0600 static struct sk_buff *rtw_txq_dequeue(struct rtw_dev *rtwdev,
0601 struct rtw_txq *rtwtxq)
0602 {
0603 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
0604 struct sk_buff *skb;
0605
0606 skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
0607 if (!skb)
0608 return NULL;
0609
0610 return skb;
0611 }
0612
0613 static void rtw_txq_push(struct rtw_dev *rtwdev,
0614 struct rtw_txq *rtwtxq,
0615 unsigned long frames)
0616 {
0617 struct sk_buff *skb;
0618 int ret;
0619 int i;
0620
0621 rcu_read_lock();
0622
0623 for (i = 0; i < frames; i++) {
0624 skb = rtw_txq_dequeue(rtwdev, rtwtxq);
0625 if (!skb)
0626 break;
0627
0628 ret = rtw_txq_push_skb(rtwdev, rtwtxq, skb);
0629 if (ret) {
0630 rtw_err(rtwdev, "failed to pusk skb, ret %d\n", ret);
0631 break;
0632 }
0633 }
0634
0635 rcu_read_unlock();
0636 }
0637
0638 void rtw_tx_work(struct work_struct *w)
0639 {
0640 struct rtw_dev *rtwdev = container_of(w, struct rtw_dev, tx_work);
0641 struct rtw_txq *rtwtxq, *tmp;
0642
0643 spin_lock_bh(&rtwdev->txq_lock);
0644
0645 list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) {
0646 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq);
0647 unsigned long frame_cnt;
0648 unsigned long byte_cnt;
0649
0650 ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
0651 rtw_txq_push(rtwdev, rtwtxq, frame_cnt);
0652
0653 list_del_init(&rtwtxq->list);
0654 }
0655
0656 rtw_hci_tx_kick_off(rtwdev);
0657
0658 spin_unlock_bh(&rtwdev->txq_lock);
0659 }
0660
0661 void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
0662 {
0663 struct rtw_txq *rtwtxq;
0664
0665 if (!txq)
0666 return;
0667
0668 rtwtxq = (struct rtw_txq *)txq->drv_priv;
0669 INIT_LIST_HEAD(&rtwtxq->list);
0670 }
0671
0672 void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
0673 {
0674 struct rtw_txq *rtwtxq;
0675
0676 if (!txq)
0677 return;
0678
0679 rtwtxq = (struct rtw_txq *)txq->drv_priv;
0680 spin_lock_bh(&rtwdev->txq_lock);
0681 if (!list_empty(&rtwtxq->list))
0682 list_del_init(&rtwtxq->list);
0683 spin_unlock_bh(&rtwdev->txq_lock);
0684 }