0001
0002
0003
0004
0005
0006
0007
0008 #include "decl.h"
0009 #include "ioctl.h"
0010 #include "util.h"
0011 #include "fw.h"
0012 #include "main.h"
0013 #include "wmm.h"
0014 #include "11n.h"
0015
0016
0017
0018 #define DRV_PKT_DELAY_TO_FW_MAX 512
0019
0020
0021 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
0022
0023 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
0024
0025
0026 #define IPTOS_OFFSET 5
0027
0028 static bool disable_tx_amsdu;
0029 module_param(disable_tx_amsdu, bool, 0644);
0030
0031
0032
0033
0034
0035 const u8 tos_to_tid_inv[] = {
0036 0x02,
0037 0x00,
0038 0x01,
0039 0x03,
0040 0x04,
0041 0x05,
0042 0x06,
0043 0x07
0044 };
0045
0046
0047 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
0048 0x00, 0x50, 0xf2, 0x02,
0049 0x00, 0x01, 0x00
0050 };
0051
0052 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
0053 WMM_AC_BK,
0054 WMM_AC_VI,
0055 WMM_AC_VO
0056 };
0057
0058 static u8 tos_to_tid[] = {
0059
0060 0x01,
0061 0x02,
0062 0x00,
0063 0x03,
0064 0x04,
0065 0x05,
0066 0x06,
0067 0x07
0068 };
0069
0070 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
0071
0072
0073
0074
0075 static void
0076 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
0077 {
0078 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
0079
0080 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
0081 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
0082 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
0083 & MWIFIEX_ACI) >> 5]],
0084 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
0085 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
0086 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
0087 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
0088 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
0089 le16_to_cpu(ac_param->tx_op_limit));
0090 }
0091
0092
0093
0094
0095
0096
0097 static struct mwifiex_ra_list_tbl *
0098 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
0099 {
0100 struct mwifiex_ra_list_tbl *ra_list;
0101
0102 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
0103 if (!ra_list)
0104 return NULL;
0105
0106 INIT_LIST_HEAD(&ra_list->list);
0107 skb_queue_head_init(&ra_list->skb_head);
0108
0109 memcpy(ra_list->ra, ra, ETH_ALEN);
0110
0111 ra_list->total_pkt_count = 0;
0112
0113 mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
0114
0115 return ra_list;
0116 }
0117
0118
0119
0120
0121 static u8 mwifiex_get_random_ba_threshold(void)
0122 {
0123 u64 ns;
0124
0125
0126
0127
0128 ns = ktime_get_ns();
0129 ns += (ns >> 32) + (ns >> 16);
0130
0131 return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
0132 }
0133
0134
0135
0136
0137
0138 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
0139 {
0140 int i;
0141 struct mwifiex_ra_list_tbl *ra_list;
0142 struct mwifiex_adapter *adapter = priv->adapter;
0143 struct mwifiex_sta_node *node;
0144
0145
0146 for (i = 0; i < MAX_NUM_TID; ++i) {
0147 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
0148 mwifiex_dbg(adapter, INFO,
0149 "info: created ra_list %p\n", ra_list);
0150
0151 if (!ra_list)
0152 break;
0153
0154 ra_list->is_11n_enabled = 0;
0155 ra_list->tdls_link = false;
0156 ra_list->ba_status = BA_SETUP_NONE;
0157 ra_list->amsdu_in_ampdu = false;
0158 if (!mwifiex_queuing_ra_based(priv)) {
0159 if (mwifiex_is_tdls_link_setup
0160 (mwifiex_get_tdls_link_status(priv, ra))) {
0161 ra_list->tdls_link = true;
0162 ra_list->is_11n_enabled =
0163 mwifiex_tdls_peer_11n_enabled(priv, ra);
0164 } else {
0165 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
0166 }
0167 } else {
0168 spin_lock_bh(&priv->sta_list_spinlock);
0169 node = mwifiex_get_sta_entry(priv, ra);
0170 if (node)
0171 ra_list->tx_paused = node->tx_pause;
0172 ra_list->is_11n_enabled =
0173 mwifiex_is_sta_11n_enabled(priv, node);
0174 if (ra_list->is_11n_enabled)
0175 ra_list->max_amsdu = node->max_amsdu;
0176 spin_unlock_bh(&priv->sta_list_spinlock);
0177 }
0178
0179 mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
0180 ra_list, ra_list->is_11n_enabled);
0181
0182 if (ra_list->is_11n_enabled) {
0183 ra_list->ba_pkt_count = 0;
0184 ra_list->ba_packet_thr =
0185 mwifiex_get_random_ba_threshold();
0186 }
0187 list_add_tail(&ra_list->list,
0188 &priv->wmm.tid_tbl_ptr[i].ra_list);
0189 }
0190 }
0191
0192
0193
0194
0195 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
0196 {
0197
0198 priv->wmm.queue_priority[0] = WMM_AC_VO;
0199 priv->wmm.queue_priority[1] = WMM_AC_VI;
0200 priv->wmm.queue_priority[2] = WMM_AC_BE;
0201 priv->wmm.queue_priority[3] = WMM_AC_BK;
0202 }
0203
0204
0205
0206
0207 static void
0208 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
0209 {
0210 struct mwifiex_wmm_desc *wmm = &priv->wmm;
0211 u8 *queue_priority = wmm->queue_priority;
0212 int i;
0213
0214 for (i = 0; i < 4; ++i) {
0215 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
0216 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
0217 }
0218
0219 for (i = 0; i < MAX_NUM_TID; ++i)
0220 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
0221
0222 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
0223 }
0224
0225
0226
0227
0228 void
0229 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
0230 struct ieee_types_wmm_parameter *wmm_ie)
0231 {
0232 u16 cw_min, avg_back_off, tmp[4];
0233 u32 i, j, num_ac;
0234 u8 ac_idx;
0235
0236 if (!wmm_ie || !priv->wmm_enabled) {
0237
0238 mwifiex_wmm_default_queue_priorities(priv);
0239 return;
0240 }
0241
0242 mwifiex_dbg(priv->adapter, INFO,
0243 "info: WMM Parameter IE: version=%d,\t"
0244 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
0245 wmm_ie->version, wmm_ie->qos_info_bitmap &
0246 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
0247 wmm_ie->reserved);
0248
0249 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
0250 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
0251 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
0252 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
0253 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
0254
0255 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
0256 priv->wmm.queue_priority[ac_idx] = ac_idx;
0257 tmp[ac_idx] = avg_back_off;
0258
0259 mwifiex_dbg(priv->adapter, INFO,
0260 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
0261 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
0262 cw_min, avg_back_off);
0263 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
0264 }
0265
0266
0267 for (i = 0; i < num_ac; i++) {
0268 for (j = 1; j < num_ac - i; j++) {
0269 if (tmp[j - 1] > tmp[j]) {
0270 swap(tmp[j - 1], tmp[j]);
0271 swap(priv->wmm.queue_priority[j - 1],
0272 priv->wmm.queue_priority[j]);
0273 } else if (tmp[j - 1] == tmp[j]) {
0274 if (priv->wmm.queue_priority[j - 1]
0275 < priv->wmm.queue_priority[j])
0276 swap(priv->wmm.queue_priority[j - 1],
0277 priv->wmm.queue_priority[j]);
0278 }
0279 }
0280 }
0281
0282 mwifiex_wmm_queue_priorities_tid(priv);
0283 }
0284
0285
0286
0287
0288
0289
0290
0291 static enum mwifiex_wmm_ac_e
0292 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
0293 enum mwifiex_wmm_ac_e eval_ac)
0294 {
0295 int down_ac;
0296 enum mwifiex_wmm_ac_e ret_ac;
0297 struct mwifiex_wmm_ac_status *ac_status;
0298
0299 ac_status = &priv->wmm.ac_status[eval_ac];
0300
0301 if (!ac_status->disabled)
0302
0303 return eval_ac;
0304
0305
0306 ret_ac = WMM_AC_BK;
0307
0308
0309
0310
0311
0312
0313
0314
0315 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
0316 ac_status = &priv->wmm.ac_status[down_ac];
0317
0318 if (!ac_status->disabled && !ac_status->flow_required)
0319
0320
0321 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
0322 }
0323
0324 return ret_ac;
0325 }
0326
0327
0328
0329
0330 void
0331 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
0332 {
0333 int ac_val;
0334
0335 mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
0336 "BK(0), BE(1), VI(2), VO(3)\n");
0337
0338 if (!priv->wmm_enabled) {
0339
0340 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
0341 priv->wmm.ac_down_graded_vals[ac_val] =
0342 (enum mwifiex_wmm_ac_e) ac_val;
0343 } else {
0344 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
0345 priv->wmm.ac_down_graded_vals[ac_val]
0346 = mwifiex_wmm_eval_downgrade_ac(priv,
0347 (enum mwifiex_wmm_ac_e) ac_val);
0348 mwifiex_dbg(priv->adapter, INFO,
0349 "info: WMM: AC PRIO %d maps to %d\n",
0350 ac_val,
0351 priv->wmm.ac_down_graded_vals[ac_val]);
0352 }
0353 }
0354 }
0355
0356
0357
0358
0359
0360 static enum mwifiex_wmm_ac_e
0361 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
0362 {
0363
0364 static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
0365 WMM_AC_BE,
0366 WMM_AC_BK,
0367 WMM_AC_BK,
0368 WMM_AC_BE,
0369 WMM_AC_VI,
0370 WMM_AC_VI,
0371 WMM_AC_VO,
0372 WMM_AC_VO
0373 };
0374
0375 if (tos >= ARRAY_SIZE(tos_to_ac))
0376 return WMM_AC_BE;
0377
0378 return tos_to_ac[tos];
0379 }
0380
0381
0382
0383
0384
0385
0386
0387 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
0388 {
0389 enum mwifiex_wmm_ac_e ac, ac_down;
0390 u8 new_tid;
0391
0392 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
0393 ac_down = priv->wmm.ac_down_graded_vals[ac];
0394
0395
0396
0397
0398 new_tid = ac_to_tid[ac_down][tid % 2];
0399
0400 return new_tid;
0401 }
0402
0403
0404
0405
0406
0407 void
0408 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
0409 {
0410 int i, j;
0411 struct mwifiex_private *priv;
0412
0413 for (j = 0; j < adapter->priv_num; ++j) {
0414 priv = adapter->priv[j];
0415 if (!priv)
0416 continue;
0417
0418 for (i = 0; i < MAX_NUM_TID; ++i) {
0419 if (!disable_tx_amsdu &&
0420 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
0421 priv->aggr_prio_tbl[i].amsdu =
0422 priv->tos_to_tid_inv[i];
0423 else
0424 priv->aggr_prio_tbl[i].amsdu =
0425 BA_STREAM_NOT_ALLOWED;
0426 priv->aggr_prio_tbl[i].ampdu_ap =
0427 priv->tos_to_tid_inv[i];
0428 priv->aggr_prio_tbl[i].ampdu_user =
0429 priv->tos_to_tid_inv[i];
0430 }
0431
0432 priv->aggr_prio_tbl[6].amsdu
0433 = priv->aggr_prio_tbl[6].ampdu_ap
0434 = priv->aggr_prio_tbl[6].ampdu_user
0435 = BA_STREAM_NOT_ALLOWED;
0436
0437 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
0438 = priv->aggr_prio_tbl[7].ampdu_user
0439 = BA_STREAM_NOT_ALLOWED;
0440
0441 mwifiex_set_ba_params(priv);
0442 mwifiex_reset_11n_rx_seq_num(priv);
0443
0444 priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
0445 atomic_set(&priv->wmm.tx_pkts_queued, 0);
0446 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
0447 }
0448 }
0449
0450 int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
0451 {
0452 struct mwifiex_private *priv;
0453 int i;
0454
0455 for (i = 0; i < adapter->priv_num; i++) {
0456 priv = adapter->priv[i];
0457 if (!priv)
0458 continue;
0459 if (adapter->if_ops.is_port_ready &&
0460 !adapter->if_ops.is_port_ready(priv))
0461 continue;
0462 if (!skb_queue_empty(&priv->bypass_txq))
0463 return false;
0464 }
0465
0466 return true;
0467 }
0468
0469
0470
0471
0472 int
0473 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
0474 {
0475 int i;
0476 struct mwifiex_private *priv;
0477
0478 for (i = 0; i < adapter->priv_num; ++i) {
0479 priv = adapter->priv[i];
0480 if (!priv)
0481 continue;
0482 if (!priv->port_open &&
0483 (priv->bss_mode != NL80211_IFTYPE_ADHOC))
0484 continue;
0485 if (adapter->if_ops.is_port_ready &&
0486 !adapter->if_ops.is_port_ready(priv))
0487 continue;
0488 if (atomic_read(&priv->wmm.tx_pkts_queued))
0489 return false;
0490 }
0491
0492 return true;
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502 static void
0503 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
0504 struct mwifiex_ra_list_tbl *ra_list)
0505 {
0506 struct mwifiex_adapter *adapter = priv->adapter;
0507 struct sk_buff *skb, *tmp;
0508
0509 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
0510 skb_unlink(skb, &ra_list->skb_head);
0511 mwifiex_write_data_complete(adapter, skb, 0, -1);
0512 }
0513 }
0514
0515
0516
0517
0518
0519
0520
0521 static void
0522 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
0523 struct list_head *ra_list_head)
0524 {
0525 struct mwifiex_ra_list_tbl *ra_list;
0526
0527 list_for_each_entry(ra_list, ra_list_head, list)
0528 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
0529 }
0530
0531
0532
0533
0534 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
0535 {
0536 int i;
0537
0538 for (i = 0; i < MAX_NUM_TID; i++)
0539 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
0540 ra_list);
0541
0542 atomic_set(&priv->wmm.tx_pkts_queued, 0);
0543 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
0544 }
0545
0546
0547
0548
0549 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
0550 {
0551 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
0552 int i;
0553
0554 for (i = 0; i < MAX_NUM_TID; ++i) {
0555 mwifiex_dbg(priv->adapter, INFO,
0556 "info: ra_list: freeing buf for tid %d\n", i);
0557 list_for_each_entry_safe(ra_list, tmp_node,
0558 &priv->wmm.tid_tbl_ptr[i].ra_list,
0559 list) {
0560 list_del(&ra_list->list);
0561 kfree(ra_list);
0562 }
0563
0564 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
0565 }
0566 }
0567
0568 static int mwifiex_free_ack_frame(int id, void *p, void *data)
0569 {
0570 pr_warn("Have pending ack frames!\n");
0571 kfree_skb(p);
0572 return 0;
0573 }
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 void
0586 mwifiex_clean_txrx(struct mwifiex_private *priv)
0587 {
0588 struct sk_buff *skb, *tmp;
0589
0590 mwifiex_11n_cleanup_reorder_tbl(priv);
0591 spin_lock_bh(&priv->wmm.ra_list_spinlock);
0592
0593 mwifiex_wmm_cleanup_queues(priv);
0594 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
0595
0596 if (priv->adapter->if_ops.cleanup_mpa_buf)
0597 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
0598
0599 mwifiex_wmm_delete_all_ralist(priv);
0600 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
0601
0602 if (priv->adapter->if_ops.clean_pcie_ring &&
0603 !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
0604 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
0605 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0606
0607 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
0608 skb_unlink(skb, &priv->tdls_txq);
0609 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
0610 }
0611
0612 skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
0613 skb_unlink(skb, &priv->bypass_txq);
0614 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
0615 }
0616 atomic_set(&priv->adapter->bypass_tx_pending, 0);
0617
0618 idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
0619 idr_destroy(&priv->ack_status_frames);
0620 }
0621
0622
0623
0624
0625
0626 struct mwifiex_ra_list_tbl *
0627 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
0628 const u8 *ra_addr)
0629 {
0630 struct mwifiex_ra_list_tbl *ra_list;
0631
0632 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
0633 list) {
0634 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
0635 return ra_list;
0636 }
0637
0638 return NULL;
0639 }
0640
0641 void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
0642 u8 tx_pause)
0643 {
0644 struct mwifiex_ra_list_tbl *ra_list;
0645 u32 pkt_cnt = 0, tx_pkts_queued;
0646 int i;
0647
0648 spin_lock_bh(&priv->wmm.ra_list_spinlock);
0649
0650 for (i = 0; i < MAX_NUM_TID; ++i) {
0651 ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
0652 if (ra_list && ra_list->tx_paused != tx_pause) {
0653 pkt_cnt += ra_list->total_pkt_count;
0654 ra_list->tx_paused = tx_pause;
0655 if (tx_pause)
0656 priv->wmm.pkts_paused[i] +=
0657 ra_list->total_pkt_count;
0658 else
0659 priv->wmm.pkts_paused[i] -=
0660 ra_list->total_pkt_count;
0661 }
0662 }
0663
0664 if (pkt_cnt) {
0665 tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
0666 if (tx_pause)
0667 tx_pkts_queued -= pkt_cnt;
0668 else
0669 tx_pkts_queued += pkt_cnt;
0670
0671 atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
0672 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
0673 }
0674 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0675 }
0676
0677
0678
0679
0680 void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
0681 u8 *mac, u8 tx_pause)
0682 {
0683 struct mwifiex_ra_list_tbl *ra_list;
0684 u32 pkt_cnt = 0, tx_pkts_queued;
0685 int i;
0686
0687 spin_lock_bh(&priv->wmm.ra_list_spinlock);
0688
0689 for (i = 0; i < MAX_NUM_TID; ++i) {
0690 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
0691 list) {
0692 if (!memcmp(ra_list->ra, mac, ETH_ALEN))
0693 continue;
0694
0695 if (ra_list->tx_paused != tx_pause) {
0696 pkt_cnt += ra_list->total_pkt_count;
0697 ra_list->tx_paused = tx_pause;
0698 if (tx_pause)
0699 priv->wmm.pkts_paused[i] +=
0700 ra_list->total_pkt_count;
0701 else
0702 priv->wmm.pkts_paused[i] -=
0703 ra_list->total_pkt_count;
0704 }
0705 }
0706 }
0707
0708 if (pkt_cnt) {
0709 tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
0710 if (tx_pause)
0711 tx_pkts_queued -= pkt_cnt;
0712 else
0713 tx_pkts_queued += pkt_cnt;
0714
0715 atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
0716 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
0717 }
0718 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0719 }
0720
0721
0722
0723
0724
0725
0726
0727
0728 struct mwifiex_ra_list_tbl *
0729 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
0730 const u8 *ra_addr)
0731 {
0732 struct mwifiex_ra_list_tbl *ra_list;
0733
0734 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
0735 if (ra_list)
0736 return ra_list;
0737 mwifiex_ralist_add(priv, ra_addr);
0738
0739 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
0740 }
0741
0742
0743
0744
0745
0746 void
0747 mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
0748 {
0749 struct mwifiex_ra_list_tbl *ra_list;
0750 int i;
0751
0752 spin_lock_bh(&priv->wmm.ra_list_spinlock);
0753
0754 for (i = 0; i < MAX_NUM_TID; ++i) {
0755 ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
0756
0757 if (!ra_list)
0758 continue;
0759 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
0760 if (ra_list->tx_paused)
0761 priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
0762 else
0763 atomic_sub(ra_list->total_pkt_count,
0764 &priv->wmm.tx_pkts_queued);
0765 list_del(&ra_list->list);
0766 kfree(ra_list);
0767 }
0768 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0769 }
0770
0771
0772
0773
0774
0775 int
0776 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
0777 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
0778 {
0779 struct mwifiex_ra_list_tbl *rlist;
0780
0781 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
0782 list) {
0783 if (rlist == ra_list)
0784 return true;
0785 }
0786
0787 return false;
0788 }
0789
0790
0791
0792
0793
0794
0795 void
0796 mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
0797 struct sk_buff *skb)
0798 {
0799 skb_queue_tail(&priv->bypass_txq, skb);
0800 }
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811 void
0812 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
0813 struct sk_buff *skb)
0814 {
0815 struct mwifiex_adapter *adapter = priv->adapter;
0816 u32 tid;
0817 struct mwifiex_ra_list_tbl *ra_list;
0818 u8 ra[ETH_ALEN], tid_down;
0819 struct list_head list_head;
0820 int tdls_status = TDLS_NOT_SETUP;
0821 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
0822 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
0823
0824 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
0825
0826 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
0827 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
0828 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
0829 mwifiex_dbg(adapter, DATA,
0830 "TDLS setup packet for %pM.\t"
0831 "Don't block\n", ra);
0832 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
0833 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
0834 }
0835
0836 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
0837 mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
0838 mwifiex_write_data_complete(adapter, skb, 0, -1);
0839 return;
0840 }
0841
0842 tid = skb->priority;
0843
0844 spin_lock_bh(&priv->wmm.ra_list_spinlock);
0845
0846 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
0847
0848
0849
0850
0851 if (!mwifiex_queuing_ra_based(priv) &&
0852 !mwifiex_is_skb_mgmt_frame(skb)) {
0853 switch (tdls_status) {
0854 case TDLS_SETUP_COMPLETE:
0855 case TDLS_CHAN_SWITCHING:
0856 case TDLS_IN_BASE_CHAN:
0857 case TDLS_IN_OFF_CHAN:
0858 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
0859 ra);
0860 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
0861 break;
0862 case TDLS_SETUP_INPROGRESS:
0863 skb_queue_tail(&priv->tdls_txq, skb);
0864 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0865 return;
0866 default:
0867 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
0868 ra_list = list_first_entry_or_null(&list_head,
0869 struct mwifiex_ra_list_tbl, list);
0870 break;
0871 }
0872 } else {
0873 memcpy(ra, skb->data, ETH_ALEN);
0874 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
0875 eth_broadcast_addr(ra);
0876 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
0877 }
0878
0879 if (!ra_list) {
0880 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0881 mwifiex_write_data_complete(adapter, skb, 0, -1);
0882 return;
0883 }
0884
0885 skb_queue_tail(&ra_list->skb_head, skb);
0886
0887 ra_list->ba_pkt_count++;
0888 ra_list->total_pkt_count++;
0889
0890 if (atomic_read(&priv->wmm.highest_queued_prio) <
0891 priv->tos_to_tid_inv[tid_down])
0892 atomic_set(&priv->wmm.highest_queued_prio,
0893 priv->tos_to_tid_inv[tid_down]);
0894
0895 if (ra_list->tx_paused)
0896 priv->wmm.pkts_paused[tid_down]++;
0897 else
0898 atomic_inc(&priv->wmm.tx_pkts_queued);
0899
0900 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
0901 }
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
0915 const struct host_cmd_ds_command *resp)
0916 {
0917 u8 *curr = (u8 *) &resp->params.get_wmm_status;
0918 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
0919 int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
0920 bool valid = true;
0921
0922 struct mwifiex_ie_types_data *tlv_hdr;
0923 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
0924 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
0925 struct mwifiex_wmm_ac_status *ac_status;
0926
0927 mwifiex_dbg(priv->adapter, INFO,
0928 "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
0929 resp_len);
0930
0931 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
0932 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
0933 tlv_len = le16_to_cpu(tlv_hdr->header.len);
0934
0935 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
0936 break;
0937
0938 switch (le16_to_cpu(tlv_hdr->header.type)) {
0939 case TLV_TYPE_WMMQSTATUS:
0940 tlv_wmm_qstatus =
0941 (struct mwifiex_ie_types_wmm_queue_status *)
0942 tlv_hdr;
0943 mwifiex_dbg(priv->adapter, CMD,
0944 "info: CMD_RESP: WMM_GET_STATUS:\t"
0945 "QSTATUS TLV: %d, %d, %d\n",
0946 tlv_wmm_qstatus->queue_index,
0947 tlv_wmm_qstatus->flow_required,
0948 tlv_wmm_qstatus->disabled);
0949
0950 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
0951 queue_index];
0952 ac_status->disabled = tlv_wmm_qstatus->disabled;
0953 ac_status->flow_required =
0954 tlv_wmm_qstatus->flow_required;
0955 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
0956 break;
0957
0958 case WLAN_EID_VENDOR_SPECIFIC:
0959
0960
0961
0962
0963
0964 wmm_param_ie =
0965 (struct ieee_types_wmm_parameter *) (curr +
0966 2);
0967 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
0968 wmm_param_ie->vend_hdr.element_id =
0969 WLAN_EID_VENDOR_SPECIFIC;
0970
0971 mwifiex_dbg(priv->adapter, CMD,
0972 "info: CMD_RESP: WMM_GET_STATUS:\t"
0973 "WMM Parameter Set Count: %d\n",
0974 wmm_param_ie->qos_info_bitmap & mask);
0975
0976 if (wmm_param_ie->vend_hdr.len + 2 >
0977 sizeof(struct ieee_types_wmm_parameter))
0978 break;
0979
0980 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
0981 wmm_ie, wmm_param_ie,
0982 wmm_param_ie->vend_hdr.len + 2);
0983
0984 break;
0985
0986 default:
0987 valid = false;
0988 break;
0989 }
0990
0991 curr += (tlv_len + sizeof(tlv_hdr->header));
0992 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
0993 }
0994
0995 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
0996 mwifiex_wmm_setup_ac_downgrade(priv);
0997
0998 return 0;
0999 }
1000
1001
1002
1003
1004
1005
1006
1007
1008 u32
1009 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
1010 u8 **assoc_buf,
1011 struct ieee_types_wmm_parameter *wmm_ie,
1012 struct ieee80211_ht_cap *ht_cap)
1013 {
1014 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
1015 u32 ret_len = 0;
1016
1017
1018 if (!assoc_buf)
1019 return 0;
1020 if (!(*assoc_buf))
1021 return 0;
1022
1023 if (!wmm_ie)
1024 return 0;
1025
1026 mwifiex_dbg(priv->adapter, INFO,
1027 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
1028 wmm_ie->vend_hdr.element_id);
1029
1030 if ((priv->wmm_required ||
1031 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
1032 priv->adapter->config_bands & BAND_AN))) &&
1033 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
1034 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
1035 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
1036 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
1037 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
1038 le16_to_cpu(wmm_tlv->header.len));
1039 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
1040 memcpy((u8 *) (wmm_tlv->wmm_ie
1041 + le16_to_cpu(wmm_tlv->header.len)
1042 - sizeof(priv->wmm_qosinfo)),
1043 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
1044
1045 ret_len = sizeof(wmm_tlv->header)
1046 + le16_to_cpu(wmm_tlv->header.len);
1047
1048 *assoc_buf += ret_len;
1049 }
1050
1051 return ret_len;
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 u8
1064 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
1065 const struct sk_buff *skb)
1066 {
1067 u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
1068 u8 ret_val;
1069
1070
1071
1072
1073
1074
1075
1076 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
1077
1078 mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
1079 "%d ms sent to FW\n", queue_delay, ret_val);
1080
1081 return ret_val;
1082 }
1083
1084
1085
1086
1087 static struct mwifiex_ra_list_tbl *
1088 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
1089 struct mwifiex_private **priv, int *tid)
1090 {
1091 struct mwifiex_private *priv_tmp;
1092 struct mwifiex_ra_list_tbl *ptr;
1093 struct mwifiex_tid_tbl *tid_ptr;
1094 atomic_t *hqp;
1095 int i, j;
1096
1097
1098 for (j = adapter->priv_num - 1; j >= 0; --j) {
1099
1100 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
1101 &adapter->bss_prio_tbl[j].bss_prio_head,
1102 list) {
1103
1104 try_again:
1105 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
1106
1107 if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
1108 !priv_tmp->port_open) ||
1109 (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
1110 continue;
1111
1112 if (adapter->if_ops.is_port_ready &&
1113 !adapter->if_ops.is_port_ready(priv_tmp))
1114 continue;
1115
1116
1117 hqp = &priv_tmp->wmm.highest_queued_prio;
1118 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
1119
1120 spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
1121
1122 tid_ptr = &(priv_tmp)->wmm.
1123 tid_tbl_ptr[tos_to_tid[i]];
1124
1125
1126 list_for_each_entry(ptr, &tid_ptr->ra_list,
1127 list) {
1128
1129 if (!ptr->tx_paused &&
1130 !skb_queue_empty(&ptr->skb_head))
1131
1132 goto found;
1133 }
1134
1135 spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1136 }
1137
1138 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
1139 atomic_set(&priv_tmp->wmm.highest_queued_prio,
1140 HIGH_PRIO_TID);
1141
1142
1143
1144 goto try_again;
1145 } else
1146 atomic_set(&priv_tmp->wmm.highest_queued_prio,
1147 NO_PKT_PRIO_TID);
1148 }
1149 }
1150
1151 return NULL;
1152
1153 found:
1154
1155 if (atomic_read(hqp) > i)
1156 atomic_set(hqp, i);
1157 spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1158
1159 *priv = priv_tmp;
1160 *tid = tos_to_tid[i];
1161
1162 return ptr;
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1175 struct mwifiex_ra_list_tbl *ra,
1176 int tid)
1177 {
1178 struct mwifiex_adapter *adapter = priv->adapter;
1179 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1180 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1181
1182 spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1183
1184
1185
1186
1187 list_move(&tbl[priv->bss_priority].bss_prio_head,
1188 &tbl[priv->bss_priority].bss_prio_cur->list);
1189 spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1190
1191 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1192 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1193 priv->wmm.packets_out[tid]++;
1194
1195 list_move(&tid_ptr->ra_list, &ra->list);
1196 }
1197 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1198 }
1199
1200
1201
1202
1203 static int
1204 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1205 struct mwifiex_ra_list_tbl *ptr,
1206 int max_buf_size)
1207 {
1208 int count = 0, total_size = 0;
1209 struct sk_buff *skb, *tmp;
1210 int max_amsdu_size;
1211
1212 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1213 ptr->is_11n_enabled)
1214 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1215 else
1216 max_amsdu_size = max_buf_size;
1217
1218 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1219 total_size += skb->len;
1220 if (total_size >= max_amsdu_size)
1221 break;
1222 if (++count >= MIN_NUM_AMSDU)
1223 return true;
1224 }
1225
1226 return false;
1227 }
1228
1229
1230
1231
1232 static void
1233 mwifiex_send_single_packet(struct mwifiex_private *priv,
1234 struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1235 __releases(&priv->wmm.ra_list_spinlock)
1236 {
1237 struct sk_buff *skb, *skb_next;
1238 struct mwifiex_tx_param tx_param;
1239 struct mwifiex_adapter *adapter = priv->adapter;
1240 struct mwifiex_txinfo *tx_info;
1241
1242 if (skb_queue_empty(&ptr->skb_head)) {
1243 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1244 mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
1245 return;
1246 }
1247
1248 skb = skb_dequeue(&ptr->skb_head);
1249
1250 tx_info = MWIFIEX_SKB_TXCB(skb);
1251 mwifiex_dbg(adapter, DATA,
1252 "data: dequeuing the packet %p %p\n", ptr, skb);
1253
1254 ptr->total_pkt_count--;
1255
1256 if (!skb_queue_empty(&ptr->skb_head))
1257 skb_next = skb_peek(&ptr->skb_head);
1258 else
1259 skb_next = NULL;
1260
1261 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1262
1263 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1264 sizeof(struct txpd) : 0);
1265
1266 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1267
1268 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1269
1270 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1271 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1272 mwifiex_write_data_complete(adapter, skb, 0, -1);
1273 return;
1274 }
1275
1276 skb_queue_tail(&ptr->skb_head, skb);
1277
1278 ptr->total_pkt_count++;
1279 ptr->ba_pkt_count++;
1280 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1281 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1282 } else {
1283 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1284 atomic_dec(&priv->wmm.tx_pkts_queued);
1285 }
1286 }
1287
1288
1289
1290
1291
1292 static int
1293 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1294 struct mwifiex_ra_list_tbl *ptr)
1295 {
1296 struct sk_buff *skb;
1297 struct mwifiex_txinfo *tx_info;
1298
1299 if (skb_queue_empty(&ptr->skb_head))
1300 return false;
1301
1302 skb = skb_peek(&ptr->skb_head);
1303
1304 tx_info = MWIFIEX_SKB_TXCB(skb);
1305 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1306 return true;
1307
1308 return false;
1309 }
1310
1311
1312
1313
1314
1315 static void
1316 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1317 struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1318 __releases(&priv->wmm.ra_list_spinlock)
1319 {
1320 struct mwifiex_tx_param tx_param;
1321 struct mwifiex_adapter *adapter = priv->adapter;
1322 int ret = -1;
1323 struct sk_buff *skb, *skb_next;
1324 struct mwifiex_txinfo *tx_info;
1325
1326 if (skb_queue_empty(&ptr->skb_head)) {
1327 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1328 return;
1329 }
1330
1331 skb = skb_dequeue(&ptr->skb_head);
1332
1333 if (adapter->data_sent || adapter->tx_lock_flag) {
1334 ptr->total_pkt_count--;
1335 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1336 skb_queue_tail(&adapter->tx_data_q, skb);
1337 atomic_dec(&priv->wmm.tx_pkts_queued);
1338 atomic_inc(&adapter->tx_queued);
1339 return;
1340 }
1341
1342 if (!skb_queue_empty(&ptr->skb_head))
1343 skb_next = skb_peek(&ptr->skb_head);
1344 else
1345 skb_next = NULL;
1346
1347 tx_info = MWIFIEX_SKB_TXCB(skb);
1348
1349 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1350
1351 tx_param.next_pkt_len =
1352 ((skb_next) ? skb_next->len +
1353 sizeof(struct txpd) : 0);
1354 if (adapter->iface_type == MWIFIEX_USB) {
1355 ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
1356 skb, &tx_param);
1357 } else {
1358 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1359 skb, &tx_param);
1360 }
1361
1362 switch (ret) {
1363 case -EBUSY:
1364 mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
1365 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1366
1367 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1368 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1369 mwifiex_write_data_complete(adapter, skb, 0, -1);
1370 return;
1371 }
1372
1373 skb_queue_tail(&ptr->skb_head, skb);
1374
1375 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1376 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1377 break;
1378 case -1:
1379 mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
1380 adapter->dbg.num_tx_host_to_card_failure++;
1381 mwifiex_write_data_complete(adapter, skb, 0, ret);
1382 break;
1383 case -EINPROGRESS:
1384 break;
1385 case 0:
1386 mwifiex_write_data_complete(adapter, skb, 0, ret);
1387 break;
1388 default:
1389 break;
1390 }
1391 if (ret != -EBUSY) {
1392 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1393 atomic_dec(&priv->wmm.tx_pkts_queued);
1394 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1395 ptr->total_pkt_count--;
1396 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1397 }
1398 }
1399
1400
1401
1402
1403
1404 static int
1405 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1406 {
1407 struct mwifiex_ra_list_tbl *ptr;
1408 struct mwifiex_private *priv = NULL;
1409 int ptr_index = 0;
1410 u8 ra[ETH_ALEN];
1411 int tid_del = 0, tid = 0;
1412
1413 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1414 if (!ptr)
1415 return -1;
1416
1417 tid = mwifiex_get_tid(ptr);
1418
1419 mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
1420
1421 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1422 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1423 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1424 return -1;
1425 }
1426
1427 if (mwifiex_is_ptr_processed(priv, ptr)) {
1428 mwifiex_send_processed_packet(priv, ptr, ptr_index);
1429
1430
1431 return 0;
1432 }
1433
1434 if (!ptr->is_11n_enabled ||
1435 ptr->ba_status ||
1436 priv->wps.session_enable) {
1437 if (ptr->is_11n_enabled &&
1438 ptr->ba_status &&
1439 ptr->amsdu_in_ampdu &&
1440 mwifiex_is_amsdu_allowed(priv, tid) &&
1441 mwifiex_is_11n_aggragation_possible(priv, ptr,
1442 adapter->tx_buf_size))
1443 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1444
1445
1446
1447 else
1448 mwifiex_send_single_packet(priv, ptr, ptr_index);
1449
1450
1451
1452 } else {
1453 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1454 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1455 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1456 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1457 BA_SETUP_INPROGRESS);
1458 mwifiex_send_addba(priv, tid, ptr->ra);
1459 } else if (mwifiex_find_stream_to_delete
1460 (priv, tid, &tid_del, ra)) {
1461 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1462 BA_SETUP_INPROGRESS);
1463 mwifiex_send_delba(priv, tid_del, ra, 1);
1464 }
1465 }
1466 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1467 mwifiex_is_11n_aggragation_possible(priv, ptr,
1468 adapter->tx_buf_size))
1469 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1470
1471
1472 else
1473 mwifiex_send_single_packet(priv, ptr, ptr_index);
1474
1475
1476 }
1477 return 0;
1478 }
1479
1480 void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
1481 {
1482 struct mwifiex_tx_param tx_param;
1483 struct sk_buff *skb;
1484 struct mwifiex_txinfo *tx_info;
1485 struct mwifiex_private *priv;
1486 int i;
1487
1488 if (adapter->data_sent || adapter->tx_lock_flag)
1489 return;
1490
1491 for (i = 0; i < adapter->priv_num; ++i) {
1492 priv = adapter->priv[i];
1493
1494 if (!priv)
1495 continue;
1496
1497 if (adapter->if_ops.is_port_ready &&
1498 !adapter->if_ops.is_port_ready(priv))
1499 continue;
1500
1501 if (skb_queue_empty(&priv->bypass_txq))
1502 continue;
1503
1504 skb = skb_dequeue(&priv->bypass_txq);
1505 tx_info = MWIFIEX_SKB_TXCB(skb);
1506
1507
1508 tx_param.next_pkt_len = 0;
1509
1510 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1511 skb_queue_head(&priv->bypass_txq, skb);
1512 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1513 } else {
1514 atomic_dec(&adapter->bypass_tx_pending);
1515 }
1516 }
1517 }
1518
1519
1520
1521
1522
1523 void
1524 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1525 {
1526 do {
1527 if (mwifiex_dequeue_tx_packet(adapter))
1528 break;
1529 if (adapter->iface_type != MWIFIEX_SDIO) {
1530 if (adapter->data_sent ||
1531 adapter->tx_lock_flag)
1532 break;
1533 } else {
1534 if (atomic_read(&adapter->tx_queued) >=
1535 MWIFIEX_MAX_PKTS_TXQ)
1536 break;
1537 }
1538 } while (!mwifiex_wmm_lists_empty(adapter));
1539 }