0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/compiler.h>
0013 #include <linux/errno.h>
0014 #include <linux/if_arp.h>
0015 #include <linux/in6.h>
0016 #include <linux/in.h>
0017 #include <linux/ip.h>
0018 #include <linux/kernel.h>
0019 #include <linux/module.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/proc_fs.h>
0022 #include <linux/skbuff.h>
0023 #include <linux/slab.h>
0024 #include <linux/tcp.h>
0025 #include <linux/types.h>
0026 #include <linux/wireless.h>
0027 #include <linux/etherdevice.h>
0028 #include <linux/uaccess.h>
0029
0030 #include "libipw.h"
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
0113 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
0114
0115 static int libipw_copy_snap(u8 * data, __be16 h_proto)
0116 {
0117 struct libipw_snap_hdr *snap;
0118 u8 *oui;
0119
0120 snap = (struct libipw_snap_hdr *)data;
0121 snap->dsap = 0xaa;
0122 snap->ssap = 0xaa;
0123 snap->ctrl = 0x03;
0124
0125 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
0126 oui = P802_1H_OUI;
0127 else
0128 oui = RFC1042_OUI;
0129 snap->oui[0] = oui[0];
0130 snap->oui[1] = oui[1];
0131 snap->oui[2] = oui[2];
0132
0133 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
0134
0135 return SNAP_SIZE + sizeof(u16);
0136 }
0137
0138 static int libipw_encrypt_fragment(struct libipw_device *ieee,
0139 struct sk_buff *frag, int hdr_len)
0140 {
0141 struct lib80211_crypt_data *crypt =
0142 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
0143 int res;
0144
0145 if (crypt == NULL)
0146 return -1;
0147
0148
0149
0150 atomic_inc(&crypt->refcnt);
0151 res = 0;
0152 if (crypt->ops && crypt->ops->encrypt_mpdu)
0153 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
0154
0155 atomic_dec(&crypt->refcnt);
0156 if (res < 0) {
0157 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
0158 ieee->dev->name, frag->len);
0159 ieee->ieee_stats.tx_discards++;
0160 return -1;
0161 }
0162
0163 return 0;
0164 }
0165
0166 void libipw_txb_free(struct libipw_txb *txb)
0167 {
0168 int i;
0169 if (unlikely(!txb))
0170 return;
0171 for (i = 0; i < txb->nr_frags; i++)
0172 if (txb->fragments[i])
0173 dev_kfree_skb_any(txb->fragments[i]);
0174 kfree(txb);
0175 }
0176
0177 static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
0178 int headroom, gfp_t gfp_mask)
0179 {
0180 struct libipw_txb *txb;
0181 int i;
0182
0183 txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
0184 if (!txb)
0185 return NULL;
0186
0187 memset(txb, 0, sizeof(struct libipw_txb));
0188 txb->nr_frags = nr_frags;
0189 txb->frag_size = txb_size;
0190
0191 for (i = 0; i < nr_frags; i++) {
0192 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
0193 gfp_mask);
0194 if (unlikely(!txb->fragments[i])) {
0195 i--;
0196 break;
0197 }
0198 skb_reserve(txb->fragments[i], headroom);
0199 }
0200 if (unlikely(i != nr_frags)) {
0201 while (i >= 0)
0202 dev_kfree_skb_any(txb->fragments[i--]);
0203 kfree(txb);
0204 return NULL;
0205 }
0206 return txb;
0207 }
0208
0209 static int libipw_classify(struct sk_buff *skb)
0210 {
0211 struct ethhdr *eth;
0212 struct iphdr *ip;
0213
0214 eth = (struct ethhdr *)skb->data;
0215 if (eth->h_proto != htons(ETH_P_IP))
0216 return 0;
0217
0218 ip = ip_hdr(skb);
0219 switch (ip->tos & 0xfc) {
0220 case 0x20:
0221 return 2;
0222 case 0x40:
0223 return 1;
0224 case 0x60:
0225 return 3;
0226 case 0x80:
0227 return 4;
0228 case 0xa0:
0229 return 5;
0230 case 0xc0:
0231 return 6;
0232 case 0xe0:
0233 return 7;
0234 default:
0235 return 0;
0236 }
0237 }
0238
0239
0240
0241 netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
0242 {
0243 struct libipw_device *ieee = netdev_priv(dev);
0244 struct libipw_txb *txb = NULL;
0245 struct libipw_hdr_3addrqos *frag_hdr;
0246 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
0247 rts_required;
0248 unsigned long flags;
0249 int encrypt, host_encrypt, host_encrypt_msdu;
0250 __be16 ether_type;
0251 int bytes, fc, hdr_len;
0252 struct sk_buff *skb_frag;
0253 struct libipw_hdr_3addrqos header = {
0254 .duration_id = 0,
0255 .seq_ctl = 0,
0256 .qos_ctl = 0
0257 };
0258 u8 dest[ETH_ALEN], src[ETH_ALEN];
0259 struct lib80211_crypt_data *crypt;
0260 int priority = skb->priority;
0261 int snapped = 0;
0262
0263 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
0264 return NETDEV_TX_BUSY;
0265
0266 spin_lock_irqsave(&ieee->lock, flags);
0267
0268
0269
0270 if (!ieee->hard_start_xmit) {
0271 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
0272 goto success;
0273 }
0274
0275 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
0276 printk(KERN_WARNING "%s: skb too small (%d).\n",
0277 ieee->dev->name, skb->len);
0278 goto success;
0279 }
0280
0281 ether_type = ((struct ethhdr *)skb->data)->h_proto;
0282
0283 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
0284
0285 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
0286 ieee->sec.encrypt;
0287
0288 host_encrypt = ieee->host_encrypt && encrypt && crypt;
0289 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
0290
0291 if (!encrypt && ieee->ieee802_1x &&
0292 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
0293 dev->stats.tx_dropped++;
0294 goto success;
0295 }
0296
0297
0298 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
0299 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
0300
0301 if (host_encrypt)
0302 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
0303 IEEE80211_FCTL_PROTECTED;
0304 else
0305 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
0306
0307 if (ieee->iw_mode == IW_MODE_INFRA) {
0308 fc |= IEEE80211_FCTL_TODS;
0309
0310 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
0311 memcpy(header.addr2, src, ETH_ALEN);
0312 memcpy(header.addr3, dest, ETH_ALEN);
0313 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
0314
0315 memcpy(header.addr1, dest, ETH_ALEN);
0316 memcpy(header.addr2, src, ETH_ALEN);
0317 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
0318 }
0319 hdr_len = LIBIPW_3ADDR_LEN;
0320
0321 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
0322 fc |= IEEE80211_STYPE_QOS_DATA;
0323 hdr_len += 2;
0324
0325 skb->priority = libipw_classify(skb);
0326 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
0327 }
0328 header.frame_ctl = cpu_to_le16(fc);
0329
0330
0331 skb_pull(skb, sizeof(struct ethhdr));
0332
0333
0334 bytes = skb->len + SNAP_SIZE + sizeof(u16);
0335
0336
0337 if ((host_encrypt || host_encrypt_msdu) &&
0338 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
0339 int res = 0;
0340 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
0341 crypt->ops->extra_msdu_postfix_len;
0342 struct sk_buff *skb_new = dev_alloc_skb(len);
0343
0344 if (unlikely(!skb_new))
0345 goto failed;
0346
0347 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
0348 skb_put_data(skb_new, &header, hdr_len);
0349 snapped = 1;
0350 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
0351 ether_type);
0352 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
0353 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
0354 if (res < 0) {
0355 LIBIPW_ERROR("msdu encryption failed\n");
0356 dev_kfree_skb_any(skb_new);
0357 goto failed;
0358 }
0359 dev_kfree_skb_any(skb);
0360 skb = skb_new;
0361 bytes += crypt->ops->extra_msdu_prefix_len +
0362 crypt->ops->extra_msdu_postfix_len;
0363 skb_pull(skb, hdr_len);
0364 }
0365
0366 if (host_encrypt || ieee->host_open_frag) {
0367
0368
0369 if (is_multicast_ether_addr(dest) ||
0370 is_broadcast_ether_addr(dest))
0371 frag_size = MAX_FRAG_THRESHOLD;
0372 else
0373 frag_size = ieee->fts;
0374
0375
0376
0377
0378
0379 bytes_per_frag = frag_size - hdr_len;
0380 if (ieee->config &
0381 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
0382 bytes_per_frag -= LIBIPW_FCS_LEN;
0383
0384
0385
0386 if (host_encrypt && crypt && crypt->ops)
0387 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
0388 crypt->ops->extra_mpdu_postfix_len;
0389
0390
0391
0392 nr_frags = bytes / bytes_per_frag;
0393 bytes_last_frag = bytes % bytes_per_frag;
0394 if (bytes_last_frag)
0395 nr_frags++;
0396 else
0397 bytes_last_frag = bytes_per_frag;
0398 } else {
0399 nr_frags = 1;
0400 bytes_per_frag = bytes_last_frag = bytes;
0401 frag_size = bytes + hdr_len;
0402 }
0403
0404 rts_required = (frag_size > ieee->rts
0405 && ieee->config & CFG_LIBIPW_RTS);
0406 if (rts_required)
0407 nr_frags++;
0408
0409
0410
0411
0412 txb = libipw_alloc_txb(nr_frags, frag_size,
0413 ieee->tx_headroom, GFP_ATOMIC);
0414 if (unlikely(!txb)) {
0415 printk(KERN_WARNING "%s: Could not allocate TXB\n",
0416 ieee->dev->name);
0417 goto failed;
0418 }
0419 txb->encrypted = encrypt;
0420 if (host_encrypt)
0421 txb->payload_size = frag_size * (nr_frags - 1) +
0422 bytes_last_frag;
0423 else
0424 txb->payload_size = bytes;
0425
0426 if (rts_required) {
0427 skb_frag = txb->fragments[0];
0428 frag_hdr = skb_put(skb_frag, hdr_len);
0429
0430
0431
0432
0433 header.frame_ctl =
0434 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
0435 memcpy(frag_hdr, &header, hdr_len);
0436
0437
0438
0439
0440 header.frame_ctl = cpu_to_le16(fc);
0441
0442 if (ieee->config &
0443 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
0444 skb_put(skb_frag, 4);
0445
0446 txb->rts_included = 1;
0447 i = 1;
0448 } else
0449 i = 0;
0450
0451 for (; i < nr_frags; i++) {
0452 skb_frag = txb->fragments[i];
0453
0454 if (host_encrypt)
0455 skb_reserve(skb_frag,
0456 crypt->ops->extra_mpdu_prefix_len);
0457
0458 frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
0459
0460
0461
0462 if (i != nr_frags - 1) {
0463 frag_hdr->frame_ctl =
0464 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
0465 bytes = bytes_per_frag;
0466 } else {
0467
0468 bytes = bytes_last_frag;
0469 }
0470
0471 if (i == 0 && !snapped) {
0472 libipw_copy_snap(skb_put
0473 (skb_frag, SNAP_SIZE + sizeof(u16)),
0474 ether_type);
0475 bytes -= SNAP_SIZE + sizeof(u16);
0476 }
0477
0478 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
0479
0480
0481 skb_pull(skb, bytes);
0482
0483
0484
0485 if (host_encrypt)
0486 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
0487
0488 if (ieee->config &
0489 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
0490 skb_put(skb_frag, 4);
0491 }
0492
0493 success:
0494 spin_unlock_irqrestore(&ieee->lock, flags);
0495
0496 dev_kfree_skb_any(skb);
0497
0498 if (txb) {
0499 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
0500 if (ret == NETDEV_TX_OK) {
0501 dev->stats.tx_packets++;
0502 dev->stats.tx_bytes += txb->payload_size;
0503 return NETDEV_TX_OK;
0504 }
0505
0506 libipw_txb_free(txb);
0507 }
0508
0509 return NETDEV_TX_OK;
0510
0511 failed:
0512 spin_unlock_irqrestore(&ieee->lock, flags);
0513 netif_stop_queue(dev);
0514 dev->stats.tx_errors++;
0515 return NETDEV_TX_BUSY;
0516 }
0517 EXPORT_SYMBOL(libipw_xmit);
0518
0519 EXPORT_SYMBOL(libipw_txb_free);