0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/compiler.h>
0015 #include <linux/errno.h>
0016 #include <linux/if_arp.h>
0017 #include <linux/in6.h>
0018 #include <linux/in.h>
0019 #include <linux/ip.h>
0020 #include <linux/kernel.h>
0021 #include <linux/module.h>
0022 #include <linux/netdevice.h>
0023 #include <linux/pci.h>
0024 #include <linux/proc_fs.h>
0025 #include <linux/skbuff.h>
0026 #include <linux/slab.h>
0027 #include <linux/tcp.h>
0028 #include <linux/types.h>
0029 #include <linux/wireless.h>
0030 #include <linux/etherdevice.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/if_vlan.h>
0033
0034 #include "rtllib.h"
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
0132 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
0133
0134 static int rtllib_put_snap(u8 *data, u16 h_proto)
0135 {
0136 struct rtllib_snap_hdr *snap;
0137 u8 *oui;
0138
0139 snap = (struct rtllib_snap_hdr *)data;
0140 snap->dsap = 0xaa;
0141 snap->ssap = 0xaa;
0142 snap->ctrl = 0x03;
0143
0144 if (h_proto == 0x8137 || h_proto == 0x80f3)
0145 oui = P802_1H_OUI;
0146 else
0147 oui = RFC1042_OUI;
0148 snap->oui[0] = oui[0];
0149 snap->oui[1] = oui[1];
0150 snap->oui[2] = oui[2];
0151
0152 *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
0153
0154 return SNAP_SIZE + sizeof(u16);
0155 }
0156
0157 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
0158 int hdr_len)
0159 {
0160 struct lib80211_crypt_data *crypt = NULL;
0161 int res;
0162
0163 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
0164
0165 if (!(crypt && crypt->ops)) {
0166 netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
0167 __func__);
0168 return -1;
0169 }
0170
0171
0172
0173
0174
0175
0176
0177 atomic_inc(&crypt->refcnt);
0178 res = 0;
0179 if (crypt->ops->encrypt_msdu)
0180 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
0181 if (res == 0 && crypt->ops->encrypt_mpdu)
0182 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
0183
0184 atomic_dec(&crypt->refcnt);
0185 if (res < 0) {
0186 netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
0187 ieee->dev->name, frag->len);
0188 return -1;
0189 }
0190
0191 return 0;
0192 }
0193
0194
0195 void rtllib_txb_free(struct rtllib_txb *txb)
0196 {
0197 if (unlikely(!txb))
0198 return;
0199 kfree(txb);
0200 }
0201
0202 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
0203 gfp_t gfp_mask)
0204 {
0205 struct rtllib_txb *txb;
0206 int i;
0207
0208 txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
0209 if (!txb)
0210 return NULL;
0211
0212 txb->nr_frags = nr_frags;
0213 txb->frag_size = cpu_to_le16(txb_size);
0214
0215 for (i = 0; i < nr_frags; i++) {
0216 txb->fragments[i] = dev_alloc_skb(txb_size);
0217 if (unlikely(!txb->fragments[i]))
0218 goto err_free;
0219 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
0220 }
0221
0222 return txb;
0223
0224 err_free:
0225 while (--i >= 0)
0226 dev_kfree_skb_any(txb->fragments[i]);
0227 kfree(txb);
0228
0229 return NULL;
0230 }
0231
0232 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
0233 {
0234 struct ethhdr *eth;
0235 struct iphdr *ip;
0236
0237 eth = (struct ethhdr *)skb->data;
0238 if (eth->h_proto != htons(ETH_P_IP))
0239 return 0;
0240
0241 #ifdef VERBOSE_DEBUG
0242 print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE, skb->data,
0243 skb->len);
0244 #endif
0245 ip = ip_hdr(skb);
0246 switch (ip->tos & 0xfc) {
0247 case 0x20:
0248 return 2;
0249 case 0x40:
0250 return 1;
0251 case 0x60:
0252 return 3;
0253 case 0x80:
0254 return 4;
0255 case 0xa0:
0256 return 5;
0257 case 0xc0:
0258 return 6;
0259 case 0xe0:
0260 return 7;
0261 default:
0262 return 0;
0263 }
0264 }
0265
0266 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
0267 struct sk_buff *skb,
0268 struct cb_desc *tcb_desc)
0269 {
0270 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
0271 struct tx_ts_record *pTxTs = NULL;
0272 struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
0273
0274 if (rtllib_act_scanning(ieee, false))
0275 return;
0276
0277 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
0278 return;
0279 if (!IsQoSDataFrame(skb->data))
0280 return;
0281 if (is_multicast_ether_addr(hdr->addr1))
0282 return;
0283
0284 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
0285 return;
0286
0287 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
0288 return;
0289
0290 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
0291 return;
0292 if (pHTInfo->bCurrentAMPDUEnable) {
0293 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
0294 skb->priority, TX_DIR, true)) {
0295 netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
0296 return;
0297 }
0298 if (!pTxTs->TxAdmittedBARecord.b_valid) {
0299 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
0300 KEY_TYPE_NA)) {
0301 ;
0302 } else if (tcb_desc->bdhcp == 1) {
0303 ;
0304 } else if (!pTxTs->bDisable_AddBa) {
0305 TsStartAddBaProcess(ieee, pTxTs);
0306 }
0307 goto FORCED_AGG_SETTING;
0308 } else if (!pTxTs->bUsingBa) {
0309 if (SN_LESS(pTxTs->TxAdmittedBARecord.ba_start_seq_ctrl.field.seq_num,
0310 (pTxTs->TxCurSeq+1)%4096))
0311 pTxTs->bUsingBa = true;
0312 else
0313 goto FORCED_AGG_SETTING;
0314 }
0315 if (ieee->iw_mode == IW_MODE_INFRA) {
0316 tcb_desc->bAMPDUEnable = true;
0317 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
0318 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
0319 }
0320 }
0321 FORCED_AGG_SETTING:
0322 switch (pHTInfo->ForcedAMPDUMode) {
0323 case HT_AGG_AUTO:
0324 break;
0325
0326 case HT_AGG_FORCE_ENABLE:
0327 tcb_desc->bAMPDUEnable = true;
0328 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
0329 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
0330 break;
0331
0332 case HT_AGG_FORCE_DISABLE:
0333 tcb_desc->bAMPDUEnable = false;
0334 tcb_desc->ampdu_density = 0;
0335 tcb_desc->ampdu_factor = 0;
0336 break;
0337 }
0338 }
0339
0340 static void rtllib_query_ShortPreambleMode(struct rtllib_device *ieee,
0341 struct cb_desc *tcb_desc)
0342 {
0343 tcb_desc->bUseShortPreamble = false;
0344 if (tcb_desc->data_rate == 2)
0345 return;
0346 else if (ieee->current_network.capability &
0347 WLAN_CAPABILITY_SHORT_PREAMBLE)
0348 tcb_desc->bUseShortPreamble = true;
0349 }
0350
0351 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
0352 struct cb_desc *tcb_desc)
0353 {
0354 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
0355
0356 tcb_desc->bUseShortGI = false;
0357
0358 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
0359 return;
0360
0361 if (pHTInfo->bForcedShortGI) {
0362 tcb_desc->bUseShortGI = true;
0363 return;
0364 }
0365
0366 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz)
0367 tcb_desc->bUseShortGI = true;
0368 else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz)
0369 tcb_desc->bUseShortGI = true;
0370 }
0371
0372 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
0373 struct cb_desc *tcb_desc)
0374 {
0375 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
0376
0377 tcb_desc->bPacketBW = false;
0378
0379 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
0380 return;
0381
0382 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
0383 return;
0384
0385 if ((tcb_desc->data_rate & 0x80) == 0)
0386 return;
0387 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
0388 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
0389 tcb_desc->bPacketBW = true;
0390 }
0391
0392 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
0393 struct cb_desc *tcb_desc,
0394 struct sk_buff *skb)
0395 {
0396 struct rt_hi_throughput *pHTInfo;
0397
0398 tcb_desc->bRTSSTBC = false;
0399 tcb_desc->bRTSUseShortGI = false;
0400 tcb_desc->bCTSEnable = false;
0401 tcb_desc->RTSSC = 0;
0402 tcb_desc->bRTSBW = false;
0403
0404 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
0405 return;
0406
0407 if (is_broadcast_ether_addr(skb->data+16))
0408 return;
0409
0410 if (ieee->mode < IEEE_N_24G) {
0411 if (skb->len > ieee->rts) {
0412 tcb_desc->bRTSEnable = true;
0413 tcb_desc->rts_rate = MGN_24M;
0414 } else if (ieee->current_network.buseprotection) {
0415 tcb_desc->bRTSEnable = true;
0416 tcb_desc->bCTSEnable = true;
0417 tcb_desc->rts_rate = MGN_24M;
0418 }
0419 return;
0420 }
0421
0422 pHTInfo = ieee->pHTInfo;
0423
0424 while (true) {
0425 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
0426 tcb_desc->bCTSEnable = true;
0427 tcb_desc->rts_rate = MGN_24M;
0428 tcb_desc->bRTSEnable = true;
0429 break;
0430 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
0431 HT_IOT_ACT_PURE_N_MODE)) {
0432 tcb_desc->bRTSEnable = true;
0433 tcb_desc->rts_rate = MGN_24M;
0434 break;
0435 }
0436 if (ieee->current_network.buseprotection) {
0437 tcb_desc->bRTSEnable = true;
0438 tcb_desc->bCTSEnable = true;
0439 tcb_desc->rts_rate = MGN_24M;
0440 break;
0441 }
0442 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
0443 u8 HTOpMode = pHTInfo->CurrentOpMode;
0444
0445 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
0446 HTOpMode == 3)) ||
0447 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
0448 tcb_desc->rts_rate = MGN_24M;
0449 tcb_desc->bRTSEnable = true;
0450 break;
0451 }
0452 }
0453 if (skb->len > ieee->rts) {
0454 tcb_desc->rts_rate = MGN_24M;
0455 tcb_desc->bRTSEnable = true;
0456 break;
0457 }
0458 if (tcb_desc->bAMPDUEnable) {
0459 tcb_desc->rts_rate = MGN_24M;
0460 tcb_desc->bRTSEnable = false;
0461 break;
0462 }
0463 goto NO_PROTECTION;
0464 }
0465 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
0466 tcb_desc->bUseShortPreamble = true;
0467 if (ieee->iw_mode == IW_MODE_MASTER)
0468 goto NO_PROTECTION;
0469 return;
0470 NO_PROTECTION:
0471 tcb_desc->bRTSEnable = false;
0472 tcb_desc->bCTSEnable = false;
0473 tcb_desc->rts_rate = 0;
0474 tcb_desc->RTSSC = 0;
0475 tcb_desc->bRTSBW = false;
0476 }
0477
0478
0479 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
0480 struct cb_desc *tcb_desc)
0481 {
0482 if (ieee->bTxDisableRateFallBack)
0483 tcb_desc->bTxDisableRateFallBack = true;
0484
0485 if (ieee->bTxUseDriverAssingedRate)
0486 tcb_desc->bTxUseDriverAssingedRate = true;
0487 if (!tcb_desc->bTxDisableRateFallBack ||
0488 !tcb_desc->bTxUseDriverAssingedRate) {
0489 if (ieee->iw_mode == IW_MODE_INFRA ||
0490 ieee->iw_mode == IW_MODE_ADHOC)
0491 tcb_desc->RATRIndex = 0;
0492 }
0493 }
0494
0495 static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
0496 u8 *dst)
0497 {
0498 u16 seqnum = 0;
0499
0500 if (is_multicast_ether_addr(dst))
0501 return 0;
0502 if (IsQoSDataFrame(skb->data)) {
0503 struct tx_ts_record *pTS = NULL;
0504
0505 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
0506 skb->priority, TX_DIR, true))
0507 return 0;
0508 seqnum = pTS->TxCurSeq;
0509 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
0510 return seqnum;
0511 }
0512 return 0;
0513 }
0514
0515 static int wme_downgrade_ac(struct sk_buff *skb)
0516 {
0517 switch (skb->priority) {
0518 case 6:
0519 case 7:
0520 skb->priority = 5;
0521 return 0;
0522 case 4:
0523 case 5:
0524 skb->priority = 3;
0525 return 0;
0526 case 0:
0527 case 3:
0528 skb->priority = 1;
0529 return 0;
0530 default:
0531 return -1;
0532 }
0533 }
0534
0535 static u8 rtllib_current_rate(struct rtllib_device *ieee)
0536 {
0537 if (ieee->mode & IEEE_MODE_MASK)
0538 return ieee->rate;
0539
0540 if (ieee->HTCurrentOperaRate)
0541 return ieee->HTCurrentOperaRate;
0542 else
0543 return ieee->rate & 0x7F;
0544 }
0545
0546 static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
0547 {
0548 struct rtllib_device *ieee = (struct rtllib_device *)
0549 netdev_priv_rsl(dev);
0550 struct rtllib_txb *txb = NULL;
0551 struct rtllib_hdr_3addrqos *frag_hdr;
0552 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
0553 unsigned long flags;
0554 struct net_device_stats *stats = &ieee->stats;
0555 int ether_type = 0, encrypt;
0556 int bytes, fc, qos_ctl = 0, hdr_len;
0557 struct sk_buff *skb_frag;
0558 struct rtllib_hdr_3addrqos header = {
0559 .duration_id = 0,
0560 .seq_ctl = 0,
0561 .qos_ctl = 0
0562 };
0563 int qos_activated = ieee->current_network.qos_data.active;
0564 u8 dest[ETH_ALEN];
0565 u8 src[ETH_ALEN];
0566 struct lib80211_crypt_data *crypt = NULL;
0567 struct cb_desc *tcb_desc;
0568 u8 bIsMulticast = false;
0569 u8 IsAmsdu = false;
0570 bool bdhcp = false;
0571
0572 spin_lock_irqsave(&ieee->lock, flags);
0573
0574
0575
0576
0577 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
0578 IEEE_SOFTMAC_TX_QUEUE)) ||
0579 ((!ieee->softmac_data_hard_start_xmit &&
0580 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
0581 netdev_warn(ieee->dev, "No xmit handler.\n");
0582 goto success;
0583 }
0584
0585
0586 if (likely(ieee->raw_tx == 0)) {
0587 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
0588 netdev_warn(ieee->dev, "skb too small (%d).\n",
0589 skb->len);
0590 goto success;
0591 }
0592
0593 ether_addr_copy(dest, skb->data);
0594 ether_addr_copy(src, skb->data + ETH_ALEN);
0595
0596 memset(skb->cb, 0, sizeof(skb->cb));
0597 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
0598
0599 if (ieee->iw_mode == IW_MODE_MONITOR) {
0600 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
0601 if (unlikely(!txb)) {
0602 netdev_warn(ieee->dev,
0603 "Could not allocate TXB\n");
0604 goto failed;
0605 }
0606
0607 txb->encrypted = 0;
0608 txb->payload_size = cpu_to_le16(skb->len);
0609 skb_put_data(txb->fragments[0], skb->data, skb->len);
0610
0611 goto success;
0612 }
0613
0614 if (skb->len > 282) {
0615 if (ether_type == ETH_P_IP) {
0616 const struct iphdr *ip = (struct iphdr *)
0617 ((u8 *)skb->data+14);
0618 if (ip->protocol == IPPROTO_UDP) {
0619 struct udphdr *udp;
0620
0621 udp = (struct udphdr *)((u8 *)ip +
0622 (ip->ihl << 2));
0623 if (((((u8 *)udp)[1] == 68) &&
0624 (((u8 *)udp)[3] == 67)) ||
0625 ((((u8 *)udp)[1] == 67) &&
0626 (((u8 *)udp)[3] == 68))) {
0627 bdhcp = true;
0628 ieee->LPSDelayCnt = 200;
0629 }
0630 }
0631 } else if (ether_type == ETH_P_ARP) {
0632 netdev_info(ieee->dev,
0633 "=================>DHCP Protocol start tx ARP pkt!!\n");
0634 bdhcp = true;
0635 ieee->LPSDelayCnt =
0636 ieee->current_network.tim.tim_count;
0637 }
0638 }
0639
0640 skb->priority = rtllib_classify(skb, IsAmsdu);
0641 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
0642 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
0643 ieee->host_encrypt && crypt && crypt->ops;
0644 if (!encrypt && ieee->ieee802_1x &&
0645 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
0646 stats->tx_dropped++;
0647 goto success;
0648 }
0649 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
0650 struct eapol *eap = (struct eapol *)(skb->data +
0651 sizeof(struct ethhdr) - SNAP_SIZE -
0652 sizeof(u16));
0653 netdev_dbg(ieee->dev,
0654 "TX: IEEE 802.11 EAPOL frame: %s\n",
0655 eap_get_type(eap->type));
0656 }
0657
0658
0659 skb_pull(skb, sizeof(struct ethhdr));
0660
0661
0662 bytes = skb->len + SNAP_SIZE + sizeof(u16);
0663
0664 if (encrypt)
0665 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
0666 else
0667 fc = RTLLIB_FTYPE_DATA;
0668
0669 if (qos_activated)
0670 fc |= RTLLIB_STYPE_QOS_DATA;
0671 else
0672 fc |= RTLLIB_STYPE_DATA;
0673
0674 if (ieee->iw_mode == IW_MODE_INFRA) {
0675 fc |= RTLLIB_FCTL_TODS;
0676
0677
0678
0679 ether_addr_copy(header.addr1,
0680 ieee->current_network.bssid);
0681 ether_addr_copy(header.addr2, src);
0682 if (IsAmsdu)
0683 ether_addr_copy(header.addr3,
0684 ieee->current_network.bssid);
0685 else
0686 ether_addr_copy(header.addr3, dest);
0687 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
0688
0689
0690
0691 ether_addr_copy(header.addr1, dest);
0692 ether_addr_copy(header.addr2, src);
0693 ether_addr_copy(header.addr3,
0694 ieee->current_network.bssid);
0695 }
0696
0697 bIsMulticast = is_multicast_ether_addr(header.addr1);
0698
0699 header.frame_ctl = cpu_to_le16(fc);
0700
0701
0702
0703
0704 if (bIsMulticast) {
0705 frag_size = MAX_FRAG_THRESHOLD;
0706 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
0707 } else {
0708 frag_size = ieee->fts;
0709 qos_ctl = 0;
0710 }
0711
0712 if (qos_activated) {
0713 hdr_len = RTLLIB_3ADDR_LEN + 2;
0714
0715
0716 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
0717 netdev_info(ieee->dev, "skb->priority = %x\n",
0718 skb->priority);
0719 if (wme_downgrade_ac(skb))
0720 break;
0721 netdev_info(ieee->dev, "converted skb->priority = %x\n",
0722 skb->priority);
0723 }
0724
0725 qos_ctl |= skb->priority;
0726 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
0727
0728 } else {
0729 hdr_len = RTLLIB_3ADDR_LEN;
0730 }
0731
0732
0733
0734
0735
0736 bytes_per_frag = frag_size - hdr_len;
0737 if (ieee->config &
0738 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
0739 bytes_per_frag -= RTLLIB_FCS_LEN;
0740
0741
0742
0743
0744 if (encrypt) {
0745 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
0746 crypt->ops->extra_mpdu_postfix_len +
0747 crypt->ops->extra_msdu_prefix_len +
0748 crypt->ops->extra_msdu_postfix_len;
0749 }
0750
0751
0752
0753 nr_frags = bytes / bytes_per_frag;
0754 bytes_last_frag = bytes % bytes_per_frag;
0755 if (bytes_last_frag)
0756 nr_frags++;
0757 else
0758 bytes_last_frag = bytes_per_frag;
0759
0760
0761
0762
0763
0764 txb = rtllib_alloc_txb(nr_frags, frag_size +
0765 ieee->tx_headroom, GFP_ATOMIC);
0766 if (unlikely(!txb)) {
0767 netdev_warn(ieee->dev, "Could not allocate TXB\n");
0768 goto failed;
0769 }
0770 txb->encrypted = encrypt;
0771 txb->payload_size = cpu_to_le16(bytes);
0772
0773 if (qos_activated)
0774 txb->queue_index = UP2AC(skb->priority);
0775 else
0776 txb->queue_index = WME_AC_BE;
0777
0778 for (i = 0; i < nr_frags; i++) {
0779 skb_frag = txb->fragments[i];
0780 tcb_desc = (struct cb_desc *)(skb_frag->cb +
0781 MAX_DEV_ADDR_SIZE);
0782 if (qos_activated) {
0783 skb_frag->priority = skb->priority;
0784 tcb_desc->queue_index = UP2AC(skb->priority);
0785 } else {
0786 skb_frag->priority = WME_AC_BE;
0787 tcb_desc->queue_index = WME_AC_BE;
0788 }
0789 skb_reserve(skb_frag, ieee->tx_headroom);
0790
0791 if (encrypt) {
0792 if (ieee->hwsec_active)
0793 tcb_desc->bHwSec = 1;
0794 else
0795 tcb_desc->bHwSec = 0;
0796 skb_reserve(skb_frag,
0797 crypt->ops->extra_mpdu_prefix_len +
0798 crypt->ops->extra_msdu_prefix_len);
0799 } else {
0800 tcb_desc->bHwSec = 0;
0801 }
0802 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
0803
0804
0805
0806
0807 if (i != nr_frags - 1) {
0808 frag_hdr->frame_ctl = cpu_to_le16(
0809 fc | RTLLIB_FCTL_MOREFRAGS);
0810 bytes = bytes_per_frag;
0811
0812 } else {
0813
0814 bytes = bytes_last_frag;
0815 }
0816 if ((qos_activated) && (!bIsMulticast)) {
0817 frag_hdr->seq_ctl =
0818 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
0819 header.addr1));
0820 frag_hdr->seq_ctl =
0821 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
0822 } else {
0823 frag_hdr->seq_ctl =
0824 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
0825 }
0826
0827 if (i == 0) {
0828 rtllib_put_snap(
0829 skb_put(skb_frag, SNAP_SIZE +
0830 sizeof(u16)), ether_type);
0831 bytes -= SNAP_SIZE + sizeof(u16);
0832 }
0833
0834 skb_put_data(skb_frag, skb->data, bytes);
0835
0836
0837 skb_pull(skb, bytes);
0838
0839
0840
0841
0842
0843 if (encrypt)
0844 rtllib_encrypt_fragment(ieee, skb_frag,
0845 hdr_len);
0846 if (ieee->config &
0847 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
0848 skb_put(skb_frag, 4);
0849 }
0850
0851 if ((qos_activated) && (!bIsMulticast)) {
0852 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
0853 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
0854 else
0855 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
0856 } else {
0857 if (ieee->seq_ctrl[0] == 0xFFF)
0858 ieee->seq_ctrl[0] = 0;
0859 else
0860 ieee->seq_ctrl[0]++;
0861 }
0862 } else {
0863 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
0864 netdev_warn(ieee->dev, "skb too small (%d).\n",
0865 skb->len);
0866 goto success;
0867 }
0868
0869 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
0870 if (!txb) {
0871 netdev_warn(ieee->dev, "Could not allocate TXB\n");
0872 goto failed;
0873 }
0874
0875 txb->encrypted = 0;
0876 txb->payload_size = cpu_to_le16(skb->len);
0877 skb_put_data(txb->fragments[0], skb->data, skb->len);
0878 }
0879
0880 success:
0881 if (txb) {
0882 tcb_desc = (struct cb_desc *)
0883 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
0884 tcb_desc->bTxEnableFwCalcDur = 1;
0885 tcb_desc->priority = skb->priority;
0886
0887 if (ether_type == ETH_P_PAE) {
0888 if (ieee->pHTInfo->IOTAction &
0889 HT_IOT_ACT_WA_IOT_Broadcom) {
0890 tcb_desc->data_rate =
0891 MgntQuery_TxRateExcludeCCKRates(ieee);
0892 tcb_desc->bTxDisableRateFallBack = false;
0893 } else {
0894 tcb_desc->data_rate = ieee->basic_rate;
0895 tcb_desc->bTxDisableRateFallBack = 1;
0896 }
0897
0898
0899 tcb_desc->RATRIndex = 7;
0900 tcb_desc->bTxUseDriverAssingedRate = 1;
0901 } else {
0902 if (is_multicast_ether_addr(header.addr1))
0903 tcb_desc->bMulticast = 1;
0904 if (is_broadcast_ether_addr(header.addr1))
0905 tcb_desc->bBroadcast = 1;
0906 rtllib_txrate_selectmode(ieee, tcb_desc);
0907 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
0908 tcb_desc->data_rate = ieee->basic_rate;
0909 else
0910 tcb_desc->data_rate = rtllib_current_rate(ieee);
0911
0912 if (bdhcp) {
0913 if (ieee->pHTInfo->IOTAction &
0914 HT_IOT_ACT_WA_IOT_Broadcom) {
0915 tcb_desc->data_rate =
0916 MgntQuery_TxRateExcludeCCKRates(ieee);
0917 tcb_desc->bTxDisableRateFallBack = false;
0918 } else {
0919 tcb_desc->data_rate = MGN_1M;
0920 tcb_desc->bTxDisableRateFallBack = 1;
0921 }
0922
0923
0924 tcb_desc->RATRIndex = 7;
0925 tcb_desc->bTxUseDriverAssingedRate = 1;
0926 tcb_desc->bdhcp = 1;
0927 }
0928
0929 rtllib_query_ShortPreambleMode(ieee, tcb_desc);
0930 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
0931 tcb_desc);
0932 rtllib_query_HTCapShortGI(ieee, tcb_desc);
0933 rtllib_query_BandwidthMode(ieee, tcb_desc);
0934 rtllib_query_protectionmode(ieee, tcb_desc,
0935 txb->fragments[0]);
0936 }
0937 }
0938 spin_unlock_irqrestore(&ieee->lock, flags);
0939 dev_kfree_skb_any(skb);
0940 if (txb) {
0941 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
0942 dev->stats.tx_packets++;
0943 dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
0944 rtllib_softmac_xmit(txb, ieee);
0945 } else {
0946 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
0947 stats->tx_packets++;
0948 stats->tx_bytes += le16_to_cpu(txb->payload_size);
0949 return 0;
0950 }
0951 rtllib_txb_free(txb);
0952 }
0953 }
0954
0955 return 0;
0956
0957 failed:
0958 spin_unlock_irqrestore(&ieee->lock, flags);
0959 netif_stop_queue(dev);
0960 stats->tx_errors++;
0961 return 1;
0962
0963 }
0964
0965 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
0966 {
0967 memset(skb->cb, 0, sizeof(skb->cb));
0968 return rtllib_xmit_inter(skb, dev);
0969 }
0970 EXPORT_SYMBOL(rtllib_xmit);