0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/slab.h>
0016 #include <linux/kernel.h>
0017 #include <linux/module.h>
0018 #include <linux/dma-mapping.h>
0019
0020 #include "rt2x00.h"
0021 #include "rt2x00lib.h"
0022
0023 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
0024 {
0025 struct data_queue *queue = entry->queue;
0026 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
0027 struct sk_buff *skb;
0028 struct skb_frame_desc *skbdesc;
0029 unsigned int frame_size;
0030 unsigned int head_size = 0;
0031 unsigned int tail_size = 0;
0032
0033
0034
0035
0036
0037 frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
0038
0039
0040
0041
0042
0043
0044 head_size = 4;
0045
0046
0047
0048
0049
0050
0051 if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
0052 head_size += 8;
0053 tail_size += 8;
0054 }
0055
0056
0057
0058
0059 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
0060 if (!skb)
0061 return NULL;
0062
0063
0064
0065
0066
0067 skb_reserve(skb, head_size);
0068 skb_put(skb, frame_size);
0069
0070
0071
0072
0073 skbdesc = get_skb_frame_desc(skb);
0074 memset(skbdesc, 0, sizeof(*skbdesc));
0075
0076 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
0077 dma_addr_t skb_dma;
0078
0079 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
0080 DMA_FROM_DEVICE);
0081 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
0082 dev_kfree_skb_any(skb);
0083 return NULL;
0084 }
0085
0086 skbdesc->skb_dma = skb_dma;
0087 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
0088 }
0089
0090 return skb;
0091 }
0092
0093 int rt2x00queue_map_txskb(struct queue_entry *entry)
0094 {
0095 struct device *dev = entry->queue->rt2x00dev->dev;
0096 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
0097
0098 skbdesc->skb_dma =
0099 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
0100
0101 if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
0102 return -ENOMEM;
0103
0104 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
0105 rt2x00lib_dmadone(entry);
0106 return 0;
0107 }
0108 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
0109
0110 void rt2x00queue_unmap_skb(struct queue_entry *entry)
0111 {
0112 struct device *dev = entry->queue->rt2x00dev->dev;
0113 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
0114
0115 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
0116 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
0117 DMA_FROM_DEVICE);
0118 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
0119 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
0120 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
0121 DMA_TO_DEVICE);
0122 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
0123 }
0124 }
0125 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
0126
0127 void rt2x00queue_free_skb(struct queue_entry *entry)
0128 {
0129 if (!entry->skb)
0130 return;
0131
0132 rt2x00queue_unmap_skb(entry);
0133 dev_kfree_skb_any(entry->skb);
0134 entry->skb = NULL;
0135 }
0136
0137 void rt2x00queue_align_frame(struct sk_buff *skb)
0138 {
0139 unsigned int frame_length = skb->len;
0140 unsigned int align = ALIGN_SIZE(skb, 0);
0141
0142 if (!align)
0143 return;
0144
0145 skb_push(skb, align);
0146 memmove(skb->data, skb->data + align, frame_length);
0147 skb_trim(skb, frame_length);
0148 }
0149
0150
0151
0152
0153
0154 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
0155 {
0156 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
0157
0158 if (!l2pad)
0159 return;
0160
0161 skb_push(skb, l2pad);
0162 memmove(skb->data, skb->data + l2pad, hdr_len);
0163 }
0164
0165 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
0166 {
0167 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
0168
0169 if (!l2pad)
0170 return;
0171
0172 memmove(skb->data + l2pad, skb->data, hdr_len);
0173 skb_pull(skb, l2pad);
0174 }
0175
0176 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
0177 struct sk_buff *skb,
0178 struct txentry_desc *txdesc)
0179 {
0180 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0182 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
0183 u16 seqno;
0184
0185 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
0186 return;
0187
0188 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
0189
0190 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
0191
0192
0193
0194
0195
0196
0197
0198 if (ieee80211_is_beacon(hdr->frame_control)) {
0199 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
0200
0201 return;
0202 }
0203
0204 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
0219 seqno = atomic_add_return(0x10, &intf->seqno);
0220 else
0221 seqno = atomic_read(&intf->seqno);
0222
0223 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
0224 hdr->seq_ctrl |= cpu_to_le16(seqno);
0225 }
0226
0227 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
0228 struct sk_buff *skb,
0229 struct txentry_desc *txdesc,
0230 const struct rt2x00_rate *hwrate)
0231 {
0232 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0233 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
0234 unsigned int data_length;
0235 unsigned int duration;
0236 unsigned int residual;
0237
0238
0239
0240
0241
0242
0243 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
0244 txdesc->u.plcp.ifs = IFS_BACKOFF;
0245 else
0246 txdesc->u.plcp.ifs = IFS_SIFS;
0247
0248
0249 data_length = skb->len + 4;
0250 data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
0251
0252
0253
0254
0255
0256 txdesc->u.plcp.signal = hwrate->plcp;
0257 txdesc->u.plcp.service = 0x04;
0258
0259 if (hwrate->flags & DEV_RATE_OFDM) {
0260 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
0261 txdesc->u.plcp.length_low = data_length & 0x3f;
0262 } else {
0263
0264
0265
0266 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
0267 duration = GET_DURATION(data_length, hwrate->bitrate);
0268
0269 if (residual != 0) {
0270 duration++;
0271
0272
0273
0274
0275 if (hwrate->bitrate == 110 && residual <= 30)
0276 txdesc->u.plcp.service |= 0x80;
0277 }
0278
0279 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
0280 txdesc->u.plcp.length_low = duration & 0xff;
0281
0282
0283
0284
0285
0286 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
0287 txdesc->u.plcp.signal |= 0x08;
0288 }
0289 }
0290
0291 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
0292 struct sk_buff *skb,
0293 struct txentry_desc *txdesc,
0294 struct ieee80211_sta *sta,
0295 const struct rt2x00_rate *hwrate)
0296 {
0297 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0298 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
0299 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0300 struct rt2x00_sta *sta_priv = NULL;
0301 u8 density = 0;
0302
0303 if (sta) {
0304 sta_priv = sta_to_rt2x00_sta(sta);
0305 txdesc->u.ht.wcid = sta_priv->wcid;
0306 density = sta->deflink.ht_cap.ampdu_density;
0307 }
0308
0309
0310
0311
0312
0313 if (txrate->flags & IEEE80211_TX_RC_MCS) {
0314 txdesc->u.ht.mcs = txrate->idx;
0315
0316
0317
0318
0319
0320 if (sta && txdesc->u.ht.mcs > 7 &&
0321 sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
0322 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
0323 } else {
0324 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
0325 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
0326 txdesc->u.ht.mcs |= 0x08;
0327 }
0328
0329 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
0330 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
0331 txdesc->u.ht.txop = TXOP_SIFS;
0332 else
0333 txdesc->u.ht.txop = TXOP_BACKOFF;
0334
0335
0336 return;
0337 }
0338
0339
0340
0341
0342 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
0343 txdesc->u.ht.stbc = 1;
0344
0345
0346
0347
0348
0349 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
0350 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
0351 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
0352 txdesc->u.ht.mpdu_density = density;
0353 txdesc->u.ht.ba_size = 7;
0354 }
0355
0356
0357
0358
0359
0360 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
0361 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
0362 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
0363 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
0364 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375 if (ieee80211_is_mgmt(hdr->frame_control) &&
0376 !ieee80211_is_beacon(hdr->frame_control))
0377 txdesc->u.ht.txop = TXOP_BACKOFF;
0378 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
0379 txdesc->u.ht.txop = TXOP_SIFS;
0380 else
0381 txdesc->u.ht.txop = TXOP_HTTXOP;
0382 }
0383
0384 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
0385 struct sk_buff *skb,
0386 struct txentry_desc *txdesc,
0387 struct ieee80211_sta *sta)
0388 {
0389 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
0390 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
0391 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
0392 struct ieee80211_rate *rate;
0393 const struct rt2x00_rate *hwrate = NULL;
0394
0395 memset(txdesc, 0, sizeof(*txdesc));
0396
0397
0398
0399
0400 txdesc->length = skb->len;
0401 txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
0402
0403
0404
0405
0406 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
0407 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
0408
0409
0410
0411
0412 if (ieee80211_is_rts(hdr->frame_control) ||
0413 ieee80211_is_cts(hdr->frame_control)) {
0414 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
0415 if (ieee80211_is_rts(hdr->frame_control))
0416 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
0417 else
0418 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
0419 if (tx_info->control.rts_cts_rate_idx >= 0)
0420 rate =
0421 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
0422 }
0423
0424
0425
0426
0427 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
0428 if (txdesc->retry_limit >= rt2x00dev->long_retry)
0429 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
0430
0431
0432
0433
0434 if (ieee80211_has_morefrags(hdr->frame_control)) {
0435 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
0436 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
0437 }
0438
0439
0440
0441
0442 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
0443 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
0444
0445
0446
0447
0448
0449 if ((ieee80211_is_beacon(hdr->frame_control) ||
0450 ieee80211_is_probe_resp(hdr->frame_control)) &&
0451 !(tx_info->flags & IEEE80211_TX_CTL_INJECTED))
0452 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
0453
0454 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
0455 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
0456 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
0457
0458
0459
0460
0461 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
0462 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
0463 else if (txrate->flags & IEEE80211_TX_RC_MCS)
0464 txdesc->rate_mode = RATE_MODE_HT_MIX;
0465 else {
0466 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
0467 hwrate = rt2x00_get_rate(rate->hw_value);
0468 if (hwrate->flags & DEV_RATE_OFDM)
0469 txdesc->rate_mode = RATE_MODE_OFDM;
0470 else
0471 txdesc->rate_mode = RATE_MODE_CCK;
0472 }
0473
0474
0475
0476
0477 rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
0478 rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
0479
0480 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
0481 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
0482 sta, hwrate);
0483 else
0484 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
0485 hwrate);
0486 }
0487
0488 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
0489 struct txentry_desc *txdesc)
0490 {
0491 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
0492
0493
0494
0495
0496
0497
0498 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
0499 rt2x00dev->ops->lib->get_entry_state(entry))) {
0500 rt2x00_err(rt2x00dev,
0501 "Corrupt queue %d, accessing entry which is not ours\n"
0502 "Please file bug report to %s\n",
0503 entry->queue->qid, DRV_PROJECT);
0504 return -EINVAL;
0505 }
0506
0507
0508
0509
0510 skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
0511 memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
0512
0513
0514
0515
0516 if (rt2x00dev->ops->lib->write_tx_data)
0517 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
0518
0519
0520
0521
0522 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
0523 rt2x00queue_map_txskb(entry))
0524 return -ENOMEM;
0525
0526 return 0;
0527 }
0528
0529 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
0530 struct txentry_desc *txdesc)
0531 {
0532 struct data_queue *queue = entry->queue;
0533
0534 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
0535
0536
0537
0538
0539
0540 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
0541 }
0542
0543 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
0544 struct txentry_desc *txdesc)
0545 {
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 if (rt2x00queue_threshold(queue) ||
0556 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
0557 queue->rt2x00dev->ops->lib->kick_queue(queue);
0558 }
0559
0560 static void rt2x00queue_bar_check(struct queue_entry *entry)
0561 {
0562 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
0563 struct ieee80211_bar *bar = (void *) (entry->skb->data +
0564 rt2x00dev->extra_tx_headroom);
0565 struct rt2x00_bar_list_entry *bar_entry;
0566
0567 if (likely(!ieee80211_is_back_req(bar->frame_control)))
0568 return;
0569
0570 bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
0571
0572
0573
0574
0575
0576
0577 if (!bar_entry)
0578 return;
0579
0580 bar_entry->entry = entry;
0581 bar_entry->block_acked = 0;
0582
0583
0584
0585
0586
0587
0588
0589 memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
0590 memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
0591 bar_entry->control = bar->control;
0592 bar_entry->start_seq_num = bar->start_seq_num;
0593
0594
0595
0596
0597 spin_lock_bh(&rt2x00dev->bar_list_lock);
0598 list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
0599 spin_unlock_bh(&rt2x00dev->bar_list_lock);
0600 }
0601
0602 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
0603 struct ieee80211_sta *sta, bool local)
0604 {
0605 struct ieee80211_tx_info *tx_info;
0606 struct queue_entry *entry;
0607 struct txentry_desc txdesc;
0608 struct skb_frame_desc *skbdesc;
0609 u8 rate_idx, rate_flags;
0610 int ret = 0;
0611
0612
0613
0614
0615
0616
0617 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
0618
0619
0620
0621
0622
0623
0624 tx_info = IEEE80211_SKB_CB(skb);
0625 rate_idx = tx_info->control.rates[0].idx;
0626 rate_flags = tx_info->control.rates[0].flags;
0627 skbdesc = get_skb_frame_desc(skb);
0628 memset(skbdesc, 0, sizeof(*skbdesc));
0629 skbdesc->tx_rate_idx = rate_idx;
0630 skbdesc->tx_rate_flags = rate_flags;
0631
0632 if (local)
0633 skbdesc->flags |= SKBDESC_NOT_MAC80211;
0634
0635
0636
0637
0638
0639
0640 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
0641 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
0642 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
0643 rt2x00crypto_tx_copy_iv(skb, &txdesc);
0644 else
0645 rt2x00crypto_tx_remove_iv(skb, &txdesc);
0646 }
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
0657 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
0658 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
0659 rt2x00queue_align_frame(skb);
0660
0661
0662
0663
0664 spin_lock(&queue->tx_lock);
0665
0666 if (unlikely(rt2x00queue_full(queue))) {
0667 rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
0668 queue->qid);
0669 ret = -ENOBUFS;
0670 goto out;
0671 }
0672
0673 entry = rt2x00queue_get_entry(queue, Q_INDEX);
0674
0675 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
0676 &entry->flags))) {
0677 rt2x00_err(queue->rt2x00dev,
0678 "Arrived at non-free entry in the non-full queue %d\n"
0679 "Please file bug report to %s\n",
0680 queue->qid, DRV_PROJECT);
0681 ret = -EINVAL;
0682 goto out;
0683 }
0684
0685 entry->skb = skb;
0686
0687
0688
0689
0690
0691
0692 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
0693 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
0694 entry->skb = NULL;
0695 ret = -EIO;
0696 goto out;
0697 }
0698
0699
0700
0701
0702 rt2x00queue_bar_check(entry);
0703
0704 set_bit(ENTRY_DATA_PENDING, &entry->flags);
0705
0706 rt2x00queue_index_inc(entry, Q_INDEX);
0707 rt2x00queue_write_tx_descriptor(entry, &txdesc);
0708 rt2x00queue_kick_tx_queue(queue, &txdesc);
0709
0710 out:
0711
0712
0713
0714
0715
0716 if (rt2x00queue_threshold(queue))
0717 rt2x00queue_pause_queue(queue);
0718
0719 spin_unlock(&queue->tx_lock);
0720 return ret;
0721 }
0722
0723 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
0724 struct ieee80211_vif *vif)
0725 {
0726 struct rt2x00_intf *intf = vif_to_intf(vif);
0727
0728 if (unlikely(!intf->beacon))
0729 return -ENOBUFS;
0730
0731
0732
0733
0734 rt2x00queue_free_skb(intf->beacon);
0735
0736
0737
0738
0739
0740 if (rt2x00dev->ops->lib->clear_beacon)
0741 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
0742
0743 return 0;
0744 }
0745
0746 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
0747 struct ieee80211_vif *vif)
0748 {
0749 struct rt2x00_intf *intf = vif_to_intf(vif);
0750 struct skb_frame_desc *skbdesc;
0751 struct txentry_desc txdesc;
0752
0753 if (unlikely(!intf->beacon))
0754 return -ENOBUFS;
0755
0756
0757
0758
0759 rt2x00queue_free_skb(intf->beacon);
0760
0761 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0);
0762 if (!intf->beacon->skb)
0763 return -ENOMEM;
0764
0765
0766
0767
0768
0769
0770 rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
0771
0772
0773
0774
0775 skbdesc = get_skb_frame_desc(intf->beacon->skb);
0776 memset(skbdesc, 0, sizeof(*skbdesc));
0777
0778
0779
0780
0781 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
0782
0783 return 0;
0784
0785 }
0786
0787 bool rt2x00queue_for_each_entry(struct data_queue *queue,
0788 enum queue_index start,
0789 enum queue_index end,
0790 void *data,
0791 bool (*fn)(struct queue_entry *entry,
0792 void *data))
0793 {
0794 unsigned long irqflags;
0795 unsigned int index_start;
0796 unsigned int index_end;
0797 unsigned int i;
0798
0799 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
0800 rt2x00_err(queue->rt2x00dev,
0801 "Entry requested from invalid index range (%d - %d)\n",
0802 start, end);
0803 return true;
0804 }
0805
0806
0807
0808
0809
0810
0811
0812 spin_lock_irqsave(&queue->index_lock, irqflags);
0813 index_start = queue->index[start];
0814 index_end = queue->index[end];
0815 spin_unlock_irqrestore(&queue->index_lock, irqflags);
0816
0817
0818
0819
0820
0821 if (index_start < index_end) {
0822 for (i = index_start; i < index_end; i++) {
0823 if (fn(&queue->entries[i], data))
0824 return true;
0825 }
0826 } else {
0827 for (i = index_start; i < queue->limit; i++) {
0828 if (fn(&queue->entries[i], data))
0829 return true;
0830 }
0831
0832 for (i = 0; i < index_end; i++) {
0833 if (fn(&queue->entries[i], data))
0834 return true;
0835 }
0836 }
0837
0838 return false;
0839 }
0840 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
0841
0842 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
0843 enum queue_index index)
0844 {
0845 struct queue_entry *entry;
0846 unsigned long irqflags;
0847
0848 if (unlikely(index >= Q_INDEX_MAX)) {
0849 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
0850 index);
0851 return NULL;
0852 }
0853
0854 spin_lock_irqsave(&queue->index_lock, irqflags);
0855
0856 entry = &queue->entries[queue->index[index]];
0857
0858 spin_unlock_irqrestore(&queue->index_lock, irqflags);
0859
0860 return entry;
0861 }
0862 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
0863
0864 void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
0865 {
0866 struct data_queue *queue = entry->queue;
0867 unsigned long irqflags;
0868
0869 if (unlikely(index >= Q_INDEX_MAX)) {
0870 rt2x00_err(queue->rt2x00dev,
0871 "Index change on invalid index type (%d)\n", index);
0872 return;
0873 }
0874
0875 spin_lock_irqsave(&queue->index_lock, irqflags);
0876
0877 queue->index[index]++;
0878 if (queue->index[index] >= queue->limit)
0879 queue->index[index] = 0;
0880
0881 entry->last_action = jiffies;
0882
0883 if (index == Q_INDEX) {
0884 queue->length++;
0885 } else if (index == Q_INDEX_DONE) {
0886 queue->length--;
0887 queue->count++;
0888 }
0889
0890 spin_unlock_irqrestore(&queue->index_lock, irqflags);
0891 }
0892
0893 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
0894 {
0895 switch (queue->qid) {
0896 case QID_AC_VO:
0897 case QID_AC_VI:
0898 case QID_AC_BE:
0899 case QID_AC_BK:
0900
0901
0902
0903
0904 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
0905 break;
0906 default:
0907 break;
0908 }
0909 }
0910 void rt2x00queue_pause_queue(struct data_queue *queue)
0911 {
0912 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
0913 !test_bit(QUEUE_STARTED, &queue->flags) ||
0914 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
0915 return;
0916
0917 rt2x00queue_pause_queue_nocheck(queue);
0918 }
0919 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
0920
0921 void rt2x00queue_unpause_queue(struct data_queue *queue)
0922 {
0923 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
0924 !test_bit(QUEUE_STARTED, &queue->flags) ||
0925 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
0926 return;
0927
0928 switch (queue->qid) {
0929 case QID_AC_VO:
0930 case QID_AC_VI:
0931 case QID_AC_BE:
0932 case QID_AC_BK:
0933
0934
0935
0936
0937 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
0938 break;
0939 case QID_RX:
0940
0941
0942
0943
0944 queue->rt2x00dev->ops->lib->kick_queue(queue);
0945 break;
0946 default:
0947 break;
0948 }
0949 }
0950 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
0951
0952 void rt2x00queue_start_queue(struct data_queue *queue)
0953 {
0954 mutex_lock(&queue->status_lock);
0955
0956 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
0957 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
0958 mutex_unlock(&queue->status_lock);
0959 return;
0960 }
0961
0962 set_bit(QUEUE_PAUSED, &queue->flags);
0963
0964 queue->rt2x00dev->ops->lib->start_queue(queue);
0965
0966 rt2x00queue_unpause_queue(queue);
0967
0968 mutex_unlock(&queue->status_lock);
0969 }
0970 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
0971
0972 void rt2x00queue_stop_queue(struct data_queue *queue)
0973 {
0974 mutex_lock(&queue->status_lock);
0975
0976 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
0977 mutex_unlock(&queue->status_lock);
0978 return;
0979 }
0980
0981 rt2x00queue_pause_queue_nocheck(queue);
0982
0983 queue->rt2x00dev->ops->lib->stop_queue(queue);
0984
0985 mutex_unlock(&queue->status_lock);
0986 }
0987 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
0988
0989 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
0990 {
0991 bool tx_queue =
0992 (queue->qid == QID_AC_VO) ||
0993 (queue->qid == QID_AC_VI) ||
0994 (queue->qid == QID_AC_BE) ||
0995 (queue->qid == QID_AC_BK);
0996
0997 if (rt2x00queue_empty(queue))
0998 return;
0999
1000
1001
1002
1003
1004
1005
1006 if (!drop && tx_queue)
1007 queue->rt2x00dev->ops->lib->kick_queue(queue);
1008
1009
1010
1011
1012
1013
1014 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1015 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1016
1017
1018
1019
1020 if (unlikely(!rt2x00queue_empty(queue)))
1021 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1022 queue->qid);
1023 }
1024 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1025
1026 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1027 {
1028 struct data_queue *queue;
1029
1030
1031
1032
1033
1034 tx_queue_for_each(rt2x00dev, queue)
1035 rt2x00queue_start_queue(queue);
1036
1037 rt2x00queue_start_queue(rt2x00dev->rx);
1038 }
1039 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1040
1041 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1042 {
1043 struct data_queue *queue;
1044
1045
1046
1047
1048
1049
1050
1051 ieee80211_stop_queues(rt2x00dev->hw);
1052
1053 tx_queue_for_each(rt2x00dev, queue)
1054 rt2x00queue_stop_queue(queue);
1055
1056 rt2x00queue_stop_queue(rt2x00dev->rx);
1057 }
1058 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1059
1060 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1061 {
1062 struct data_queue *queue;
1063
1064 tx_queue_for_each(rt2x00dev, queue)
1065 rt2x00queue_flush_queue(queue, drop);
1066
1067 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1068 }
1069 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1070
1071 static void rt2x00queue_reset(struct data_queue *queue)
1072 {
1073 unsigned long irqflags;
1074 unsigned int i;
1075
1076 spin_lock_irqsave(&queue->index_lock, irqflags);
1077
1078 queue->count = 0;
1079 queue->length = 0;
1080
1081 for (i = 0; i < Q_INDEX_MAX; i++)
1082 queue->index[i] = 0;
1083
1084 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1085 }
1086
1087 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1088 {
1089 struct data_queue *queue;
1090 unsigned int i;
1091
1092 queue_for_each(rt2x00dev, queue) {
1093 rt2x00queue_reset(queue);
1094
1095 for (i = 0; i < queue->limit; i++)
1096 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1097 }
1098 }
1099
1100 static int rt2x00queue_alloc_entries(struct data_queue *queue)
1101 {
1102 struct queue_entry *entries;
1103 unsigned int entry_size;
1104 unsigned int i;
1105
1106 rt2x00queue_reset(queue);
1107
1108
1109
1110
1111 entry_size = sizeof(*entries) + queue->priv_size;
1112 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1113 if (!entries)
1114 return -ENOMEM;
1115
1116 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1117 (((char *)(__base)) + ((__limit) * (__esize)) + \
1118 ((__index) * (__psize)))
1119
1120 for (i = 0; i < queue->limit; i++) {
1121 entries[i].flags = 0;
1122 entries[i].queue = queue;
1123 entries[i].skb = NULL;
1124 entries[i].entry_idx = i;
1125 entries[i].priv_data =
1126 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1127 sizeof(*entries), queue->priv_size);
1128 }
1129
1130 #undef QUEUE_ENTRY_PRIV_OFFSET
1131
1132 queue->entries = entries;
1133
1134 return 0;
1135 }
1136
1137 static void rt2x00queue_free_skbs(struct data_queue *queue)
1138 {
1139 unsigned int i;
1140
1141 if (!queue->entries)
1142 return;
1143
1144 for (i = 0; i < queue->limit; i++) {
1145 rt2x00queue_free_skb(&queue->entries[i]);
1146 }
1147 }
1148
1149 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1150 {
1151 unsigned int i;
1152 struct sk_buff *skb;
1153
1154 for (i = 0; i < queue->limit; i++) {
1155 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1156 if (!skb)
1157 return -ENOMEM;
1158 queue->entries[i].skb = skb;
1159 }
1160
1161 return 0;
1162 }
1163
1164 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1165 {
1166 struct data_queue *queue;
1167 int status;
1168
1169 status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1170 if (status)
1171 goto exit;
1172
1173 tx_queue_for_each(rt2x00dev, queue) {
1174 status = rt2x00queue_alloc_entries(queue);
1175 if (status)
1176 goto exit;
1177 }
1178
1179 status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1180 if (status)
1181 goto exit;
1182
1183 if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
1184 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1185 if (status)
1186 goto exit;
1187 }
1188
1189 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1190 if (status)
1191 goto exit;
1192
1193 return 0;
1194
1195 exit:
1196 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1197
1198 rt2x00queue_uninitialize(rt2x00dev);
1199
1200 return status;
1201 }
1202
1203 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1204 {
1205 struct data_queue *queue;
1206
1207 rt2x00queue_free_skbs(rt2x00dev->rx);
1208
1209 queue_for_each(rt2x00dev, queue) {
1210 kfree(queue->entries);
1211 queue->entries = NULL;
1212 }
1213 }
1214
1215 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1216 struct data_queue *queue, enum data_queue_qid qid)
1217 {
1218 mutex_init(&queue->status_lock);
1219 spin_lock_init(&queue->tx_lock);
1220 spin_lock_init(&queue->index_lock);
1221
1222 queue->rt2x00dev = rt2x00dev;
1223 queue->qid = qid;
1224 queue->txop = 0;
1225 queue->aifs = 2;
1226 queue->cw_min = 5;
1227 queue->cw_max = 10;
1228
1229 rt2x00dev->ops->queue_init(queue);
1230
1231 queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1232 }
1233
1234 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1235 {
1236 struct data_queue *queue;
1237 enum data_queue_qid qid;
1238 unsigned int req_atim =
1239 rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
1240
1241
1242
1243
1244
1245
1246
1247
1248 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1249
1250 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1251 if (!queue)
1252 return -ENOMEM;
1253
1254
1255
1256
1257 rt2x00dev->rx = queue;
1258 rt2x00dev->tx = &queue[1];
1259 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1260 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1272
1273 qid = QID_AC_VO;
1274 tx_queue_for_each(rt2x00dev, queue)
1275 rt2x00queue_init(rt2x00dev, queue, qid++);
1276
1277 rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1278 if (req_atim)
1279 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1280
1281 return 0;
1282 }
1283
1284 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1285 {
1286 kfree(rt2x00dev->rx);
1287 rt2x00dev->rx = NULL;
1288 rt2x00dev->tx = NULL;
1289 rt2x00dev->bcn = NULL;
1290 }