0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/dma-mapping.h>
0018 #include "ath9k.h"
0019 #include "ar9003_mac.h"
0020
0021 #define SKB_CB_ATHBUF(__skb) (*((struct ath_rxbuf **)__skb->cb))
0022
0023 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
0024 {
0025 return sc->ps_enabled &&
0026 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
0027 }
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf,
0038 bool flush)
0039 {
0040 struct ath_hw *ah = sc->sc_ah;
0041 struct ath_common *common = ath9k_hw_common(ah);
0042 struct ath_desc *ds;
0043 struct sk_buff *skb;
0044
0045 ds = bf->bf_desc;
0046 ds->ds_link = 0;
0047 ds->ds_data = bf->bf_buf_addr;
0048
0049
0050 skb = bf->bf_mpdu;
0051 BUG_ON(skb == NULL);
0052 ds->ds_vdata = skb->data;
0053
0054
0055
0056
0057
0058
0059 ath9k_hw_setuprxdesc(ah, ds,
0060 common->rx_bufsize,
0061 0);
0062
0063 if (sc->rx.rxlink)
0064 *sc->rx.rxlink = bf->bf_daddr;
0065 else if (!flush)
0066 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
0067
0068 sc->rx.rxlink = &ds->ds_link;
0069 }
0070
0071 static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf,
0072 bool flush)
0073 {
0074 if (sc->rx.buf_hold)
0075 ath_rx_buf_link(sc, sc->rx.buf_hold, flush);
0076
0077 sc->rx.buf_hold = bf;
0078 }
0079
0080 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
0081 {
0082
0083 ath9k_hw_setantenna(sc->sc_ah, antenna);
0084 sc->rx.defant = antenna;
0085 sc->rx.rxotherant = 0;
0086 }
0087
0088 static void ath_opmode_init(struct ath_softc *sc)
0089 {
0090 struct ath_hw *ah = sc->sc_ah;
0091 struct ath_common *common = ath9k_hw_common(ah);
0092
0093 u32 rfilt, mfilt[2];
0094
0095
0096 rfilt = ath_calcrxfilter(sc);
0097 ath9k_hw_setrxfilter(ah, rfilt);
0098
0099
0100 ath_hw_setbssidmask(common);
0101
0102
0103 ath9k_hw_setopmode(ah);
0104
0105
0106 mfilt[0] = mfilt[1] = ~0;
0107 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
0108 }
0109
0110 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
0111 enum ath9k_rx_qtype qtype)
0112 {
0113 struct ath_hw *ah = sc->sc_ah;
0114 struct ath_rx_edma *rx_edma;
0115 struct sk_buff *skb;
0116 struct ath_rxbuf *bf;
0117
0118 rx_edma = &sc->rx.rx_edma[qtype];
0119 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
0120 return false;
0121
0122 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
0123 list_del_init(&bf->list);
0124
0125 skb = bf->bf_mpdu;
0126
0127 memset(skb->data, 0, ah->caps.rx_status_len);
0128 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
0129 ah->caps.rx_status_len, DMA_TO_DEVICE);
0130
0131 SKB_CB_ATHBUF(skb) = bf;
0132 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
0133 __skb_queue_tail(&rx_edma->rx_fifo, skb);
0134
0135 return true;
0136 }
0137
0138 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
0139 enum ath9k_rx_qtype qtype)
0140 {
0141 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0142 struct ath_rxbuf *bf, *tbf;
0143
0144 if (list_empty(&sc->rx.rxbuf)) {
0145 ath_dbg(common, QUEUE, "No free rx buf available\n");
0146 return;
0147 }
0148
0149 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
0150 if (!ath_rx_edma_buf_link(sc, qtype))
0151 break;
0152
0153 }
0154
0155 static void ath_rx_remove_buffer(struct ath_softc *sc,
0156 enum ath9k_rx_qtype qtype)
0157 {
0158 struct ath_rxbuf *bf;
0159 struct ath_rx_edma *rx_edma;
0160 struct sk_buff *skb;
0161
0162 rx_edma = &sc->rx.rx_edma[qtype];
0163
0164 while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
0165 bf = SKB_CB_ATHBUF(skb);
0166 BUG_ON(!bf);
0167 list_add_tail(&bf->list, &sc->rx.rxbuf);
0168 }
0169 }
0170
0171 static void ath_rx_edma_cleanup(struct ath_softc *sc)
0172 {
0173 struct ath_hw *ah = sc->sc_ah;
0174 struct ath_common *common = ath9k_hw_common(ah);
0175 struct ath_rxbuf *bf;
0176
0177 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
0178 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
0179
0180 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
0181 if (bf->bf_mpdu) {
0182 dma_unmap_single(sc->dev, bf->bf_buf_addr,
0183 common->rx_bufsize,
0184 DMA_BIDIRECTIONAL);
0185 dev_kfree_skb_any(bf->bf_mpdu);
0186 bf->bf_buf_addr = 0;
0187 bf->bf_mpdu = NULL;
0188 }
0189 }
0190 }
0191
0192 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
0193 {
0194 __skb_queue_head_init(&rx_edma->rx_fifo);
0195 rx_edma->rx_fifo_hwsize = size;
0196 }
0197
0198 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
0199 {
0200 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0201 struct ath_hw *ah = sc->sc_ah;
0202 struct sk_buff *skb;
0203 struct ath_rxbuf *bf;
0204 int error = 0, i;
0205 u32 size;
0206
0207 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
0208 ah->caps.rx_status_len);
0209
0210 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
0211 ah->caps.rx_lp_qdepth);
0212 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
0213 ah->caps.rx_hp_qdepth);
0214
0215 size = sizeof(struct ath_rxbuf) * nbufs;
0216 bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
0217 if (!bf)
0218 return -ENOMEM;
0219
0220 INIT_LIST_HEAD(&sc->rx.rxbuf);
0221
0222 for (i = 0; i < nbufs; i++, bf++) {
0223 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
0224 if (!skb) {
0225 error = -ENOMEM;
0226 goto rx_init_fail;
0227 }
0228
0229 memset(skb->data, 0, common->rx_bufsize);
0230 bf->bf_mpdu = skb;
0231
0232 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
0233 common->rx_bufsize,
0234 DMA_BIDIRECTIONAL);
0235 if (unlikely(dma_mapping_error(sc->dev,
0236 bf->bf_buf_addr))) {
0237 dev_kfree_skb_any(skb);
0238 bf->bf_mpdu = NULL;
0239 bf->bf_buf_addr = 0;
0240 ath_err(common,
0241 "dma_mapping_error() on RX init\n");
0242 error = -ENOMEM;
0243 goto rx_init_fail;
0244 }
0245
0246 list_add_tail(&bf->list, &sc->rx.rxbuf);
0247 }
0248
0249 return 0;
0250
0251 rx_init_fail:
0252 ath_rx_edma_cleanup(sc);
0253 return error;
0254 }
0255
0256 static void ath_edma_start_recv(struct ath_softc *sc)
0257 {
0258 ath9k_hw_rxena(sc->sc_ah);
0259 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
0260 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
0261 ath_opmode_init(sc);
0262 ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel);
0263 }
0264
0265 static void ath_edma_stop_recv(struct ath_softc *sc)
0266 {
0267 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
0268 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
0269 }
0270
0271 int ath_rx_init(struct ath_softc *sc, int nbufs)
0272 {
0273 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0274 struct sk_buff *skb;
0275 struct ath_rxbuf *bf;
0276 int error = 0;
0277
0278 spin_lock_init(&sc->sc_pcu_lock);
0279
0280 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
0281 sc->sc_ah->caps.rx_status_len;
0282
0283 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
0284 return ath_rx_edma_init(sc, nbufs);
0285
0286 ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
0287 common->cachelsz, common->rx_bufsize);
0288
0289
0290
0291 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
0292 "rx", nbufs, 1, 0);
0293 if (error != 0) {
0294 ath_err(common,
0295 "failed to allocate rx descriptors: %d\n",
0296 error);
0297 goto err;
0298 }
0299
0300 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
0301 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
0302 GFP_KERNEL);
0303 if (skb == NULL) {
0304 error = -ENOMEM;
0305 goto err;
0306 }
0307
0308 bf->bf_mpdu = skb;
0309 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
0310 common->rx_bufsize,
0311 DMA_FROM_DEVICE);
0312 if (unlikely(dma_mapping_error(sc->dev,
0313 bf->bf_buf_addr))) {
0314 dev_kfree_skb_any(skb);
0315 bf->bf_mpdu = NULL;
0316 bf->bf_buf_addr = 0;
0317 ath_err(common,
0318 "dma_mapping_error() on RX init\n");
0319 error = -ENOMEM;
0320 goto err;
0321 }
0322 }
0323 sc->rx.rxlink = NULL;
0324 err:
0325 if (error)
0326 ath_rx_cleanup(sc);
0327
0328 return error;
0329 }
0330
0331 void ath_rx_cleanup(struct ath_softc *sc)
0332 {
0333 struct ath_hw *ah = sc->sc_ah;
0334 struct ath_common *common = ath9k_hw_common(ah);
0335 struct sk_buff *skb;
0336 struct ath_rxbuf *bf;
0337
0338 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
0339 ath_rx_edma_cleanup(sc);
0340 return;
0341 }
0342
0343 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
0344 skb = bf->bf_mpdu;
0345 if (skb) {
0346 dma_unmap_single(sc->dev, bf->bf_buf_addr,
0347 common->rx_bufsize,
0348 DMA_FROM_DEVICE);
0349 dev_kfree_skb(skb);
0350 bf->bf_buf_addr = 0;
0351 bf->bf_mpdu = NULL;
0352 }
0353 }
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375 u32 ath_calcrxfilter(struct ath_softc *sc)
0376 {
0377 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0378 u32 rfilt;
0379
0380 if (IS_ENABLED(CONFIG_ATH9K_TX99))
0381 return 0;
0382
0383 rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
0384 | ATH9K_RX_FILTER_MCAST;
0385
0386
0387 if (sc->hw->conf.radar_enabled)
0388 rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;
0389
0390 spin_lock_bh(&sc->chan_lock);
0391
0392 if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
0393 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
0394
0395 if (sc->sc_ah->is_monitoring)
0396 rfilt |= ATH9K_RX_FILTER_PROM;
0397
0398 if ((sc->cur_chan->rxfilter & FIF_CONTROL) ||
0399 sc->sc_ah->dynack.enabled)
0400 rfilt |= ATH9K_RX_FILTER_CONTROL;
0401
0402 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
0403 (sc->cur_chan->nvifs <= 1) &&
0404 !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC))
0405 rfilt |= ATH9K_RX_FILTER_MYBEACON;
0406 else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB)
0407 rfilt |= ATH9K_RX_FILTER_BEACON;
0408
0409 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
0410 (sc->cur_chan->rxfilter & FIF_PSPOLL))
0411 rfilt |= ATH9K_RX_FILTER_PSPOLL;
0412
0413 if (sc->cur_chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
0414 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
0415
0416 if (sc->cur_chan->nvifs > 1 ||
0417 (sc->cur_chan->rxfilter & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) {
0418
0419 if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
0420 rfilt |= ATH9K_RX_FILTER_PROM;
0421 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
0422 }
0423
0424 if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah) ||
0425 AR_SREV_9561(sc->sc_ah))
0426 rfilt |= ATH9K_RX_FILTER_4ADDRESS;
0427
0428 if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
0429 rfilt |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
0430
0431 if (ath9k_is_chanctx_enabled() &&
0432 test_bit(ATH_OP_SCANNING, &common->op_flags))
0433 rfilt |= ATH9K_RX_FILTER_BEACON;
0434
0435 spin_unlock_bh(&sc->chan_lock);
0436
0437 return rfilt;
0438
0439 }
0440
0441 void ath_startrecv(struct ath_softc *sc)
0442 {
0443 struct ath_hw *ah = sc->sc_ah;
0444 struct ath_rxbuf *bf, *tbf;
0445
0446 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
0447 ath_edma_start_recv(sc);
0448 return;
0449 }
0450
0451 if (list_empty(&sc->rx.rxbuf))
0452 goto start_recv;
0453
0454 sc->rx.buf_hold = NULL;
0455 sc->rx.rxlink = NULL;
0456 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
0457 ath_rx_buf_link(sc, bf, false);
0458 }
0459
0460
0461 if (list_empty(&sc->rx.rxbuf))
0462 goto start_recv;
0463
0464 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
0465 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
0466 ath9k_hw_rxena(ah);
0467
0468 start_recv:
0469 ath_opmode_init(sc);
0470 ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel);
0471 }
0472
0473 static void ath_flushrecv(struct ath_softc *sc)
0474 {
0475 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
0476 ath_rx_tasklet(sc, 1, true);
0477 ath_rx_tasklet(sc, 1, false);
0478 }
0479
0480 bool ath_stoprecv(struct ath_softc *sc)
0481 {
0482 struct ath_hw *ah = sc->sc_ah;
0483 bool stopped, reset = false;
0484
0485 ath9k_hw_abortpcurecv(ah);
0486 ath9k_hw_setrxfilter(ah, 0);
0487 stopped = ath9k_hw_stopdmarecv(ah, &reset);
0488
0489 ath_flushrecv(sc);
0490
0491 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
0492 ath_edma_stop_recv(sc);
0493 else
0494 sc->rx.rxlink = NULL;
0495
0496 if (!(ah->ah_flags & AH_UNPLUGGED) &&
0497 unlikely(!stopped)) {
0498 ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
0499 "Failed to stop Rx DMA\n");
0500 RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
0501 }
0502 return stopped && !reset;
0503 }
0504
0505 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
0506 {
0507
0508 struct ieee80211_mgmt *mgmt;
0509 u8 *pos, *end, id, elen;
0510 struct ieee80211_tim_ie *tim;
0511
0512 mgmt = (struct ieee80211_mgmt *)skb->data;
0513 pos = mgmt->u.beacon.variable;
0514 end = skb->data + skb->len;
0515
0516 while (pos + 2 < end) {
0517 id = *pos++;
0518 elen = *pos++;
0519 if (pos + elen > end)
0520 break;
0521
0522 if (id == WLAN_EID_TIM) {
0523 if (elen < sizeof(*tim))
0524 break;
0525 tim = (struct ieee80211_tim_ie *) pos;
0526 if (tim->dtim_count != 0)
0527 break;
0528 return tim->bitmap_ctrl & 0x01;
0529 }
0530
0531 pos += elen;
0532 }
0533
0534 return false;
0535 }
0536
0537 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
0538 {
0539 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0540 bool skip_beacon = false;
0541
0542 if (skb->len < 24 + 8 + 2 + 2)
0543 return;
0544
0545 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
0546
0547 if (sc->ps_flags & PS_BEACON_SYNC) {
0548 sc->ps_flags &= ~PS_BEACON_SYNC;
0549 ath_dbg(common, PS,
0550 "Reconfigure beacon timers based on synchronized timestamp\n");
0551
0552 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
0553 if (ath9k_is_chanctx_enabled()) {
0554 if (sc->cur_chan == &sc->offchannel.chan)
0555 skip_beacon = true;
0556 }
0557 #endif
0558
0559 if (!skip_beacon &&
0560 !(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
0561 ath9k_set_beacon(sc);
0562
0563 ath9k_p2p_beacon_sync(sc);
0564 }
0565
0566 if (ath_beacon_dtim_pending_cab(skb)) {
0567
0568
0569
0570
0571
0572
0573
0574 ath_dbg(common, PS,
0575 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
0576 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
0577 return;
0578 }
0579
0580 if (sc->ps_flags & PS_WAIT_FOR_CAB) {
0581
0582
0583
0584
0585
0586 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
0587 ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
0588 }
0589 }
0590
0591 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
0592 {
0593 struct ieee80211_hdr *hdr;
0594 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
0595
0596 hdr = (struct ieee80211_hdr *)skb->data;
0597
0598
0599 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
0600 && mybeacon) {
0601 ath_rx_ps_beacon(sc, skb);
0602 } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
0603 (ieee80211_is_data(hdr->frame_control) ||
0604 ieee80211_is_action(hdr->frame_control)) &&
0605 is_multicast_ether_addr(hdr->addr1) &&
0606 !ieee80211_has_moredata(hdr->frame_control)) {
0607
0608
0609
0610
0611 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
0612 ath_dbg(common, PS,
0613 "All PS CAB frames received, back to sleep\n");
0614 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
0615 !is_multicast_ether_addr(hdr->addr1) &&
0616 !ieee80211_has_morefrags(hdr->frame_control)) {
0617 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
0618 ath_dbg(common, PS,
0619 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
0620 sc->ps_flags & (PS_WAIT_FOR_BEACON |
0621 PS_WAIT_FOR_CAB |
0622 PS_WAIT_FOR_PSPOLL_DATA |
0623 PS_WAIT_FOR_TX_ACK));
0624 }
0625 }
0626
0627 static bool ath_edma_get_buffers(struct ath_softc *sc,
0628 enum ath9k_rx_qtype qtype,
0629 struct ath_rx_status *rs,
0630 struct ath_rxbuf **dest)
0631 {
0632 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
0633 struct ath_hw *ah = sc->sc_ah;
0634 struct ath_common *common = ath9k_hw_common(ah);
0635 struct sk_buff *skb;
0636 struct ath_rxbuf *bf;
0637 int ret;
0638
0639 skb = skb_peek(&rx_edma->rx_fifo);
0640 if (!skb)
0641 return false;
0642
0643 bf = SKB_CB_ATHBUF(skb);
0644 BUG_ON(!bf);
0645
0646 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
0647 common->rx_bufsize, DMA_FROM_DEVICE);
0648
0649 ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
0650 if (ret == -EINPROGRESS) {
0651
0652 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
0653 common->rx_bufsize, DMA_FROM_DEVICE);
0654 return false;
0655 }
0656
0657 __skb_unlink(skb, &rx_edma->rx_fifo);
0658 if (ret == -EINVAL) {
0659
0660 list_add_tail(&bf->list, &sc->rx.rxbuf);
0661 ath_rx_edma_buf_link(sc, qtype);
0662
0663 skb = skb_peek(&rx_edma->rx_fifo);
0664 if (skb) {
0665 bf = SKB_CB_ATHBUF(skb);
0666 BUG_ON(!bf);
0667
0668 __skb_unlink(skb, &rx_edma->rx_fifo);
0669 list_add_tail(&bf->list, &sc->rx.rxbuf);
0670 ath_rx_edma_buf_link(sc, qtype);
0671 }
0672
0673 bf = NULL;
0674 }
0675
0676 *dest = bf;
0677 return true;
0678 }
0679
0680 static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
0681 struct ath_rx_status *rs,
0682 enum ath9k_rx_qtype qtype)
0683 {
0684 struct ath_rxbuf *bf = NULL;
0685
0686 while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
0687 if (!bf)
0688 continue;
0689
0690 return bf;
0691 }
0692 return NULL;
0693 }
0694
0695 static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
0696 struct ath_rx_status *rs)
0697 {
0698 struct ath_hw *ah = sc->sc_ah;
0699 struct ath_common *common = ath9k_hw_common(ah);
0700 struct ath_desc *ds;
0701 struct ath_rxbuf *bf;
0702 int ret;
0703
0704 if (list_empty(&sc->rx.rxbuf)) {
0705 sc->rx.rxlink = NULL;
0706 return NULL;
0707 }
0708
0709 bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
0710 if (bf == sc->rx.buf_hold)
0711 return NULL;
0712
0713 ds = bf->bf_desc;
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
0727 if (ret == -EINPROGRESS) {
0728 struct ath_rx_status trs;
0729 struct ath_rxbuf *tbf;
0730 struct ath_desc *tds;
0731
0732 memset(&trs, 0, sizeof(trs));
0733 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
0734 sc->rx.rxlink = NULL;
0735 return NULL;
0736 }
0737
0738 tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751 tds = tbf->bf_desc;
0752 ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
0753 if (ret == -EINPROGRESS)
0754 return NULL;
0755
0756
0757
0758
0759
0760 ret = ath9k_hw_rxprocdesc(ah, ds, rs);
0761 if (ret == -EINPROGRESS) {
0762
0763
0764
0765
0766 rs->rs_datalen = 0;
0767 rs->rs_more = true;
0768 }
0769 }
0770
0771 list_del(&bf->list);
0772 if (!bf->bf_mpdu)
0773 return bf;
0774
0775
0776
0777
0778
0779
0780 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
0781 common->rx_bufsize,
0782 DMA_FROM_DEVICE);
0783
0784 return bf;
0785 }
0786
0787 static void ath9k_process_tsf(struct ath_rx_status *rs,
0788 struct ieee80211_rx_status *rxs,
0789 u64 tsf)
0790 {
0791 u32 tsf_lower = tsf & 0xffffffff;
0792
0793 rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
0794 if (rs->rs_tstamp > tsf_lower &&
0795 unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
0796 rxs->mactime -= 0x100000000ULL;
0797
0798 if (rs->rs_tstamp < tsf_lower &&
0799 unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
0800 rxs->mactime += 0x100000000ULL;
0801 }
0802
0803
0804
0805
0806
0807
0808 static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
0809 struct sk_buff *skb,
0810 struct ath_rx_status *rx_stats,
0811 struct ieee80211_rx_status *rx_status,
0812 bool *decrypt_error, u64 tsf)
0813 {
0814 struct ieee80211_hw *hw = sc->hw;
0815 struct ath_hw *ah = sc->sc_ah;
0816 struct ath_common *common = ath9k_hw_common(ah);
0817 struct ieee80211_hdr *hdr;
0818 bool discard_current = sc->rx.discard_next;
0819 bool is_phyerr;
0820
0821
0822
0823
0824
0825 if (discard_current)
0826 goto corrupt;
0827
0828 sc->rx.discard_next = false;
0829
0830
0831
0832
0833
0834 is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY;
0835 if (!rx_stats->rs_datalen ||
0836 (rx_stats->rs_datalen < 10 && !is_phyerr)) {
0837 RX_STAT_INC(sc, rx_len_err);
0838 goto corrupt;
0839 }
0840
0841
0842
0843
0844
0845
0846 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
0847 RX_STAT_INC(sc, rx_len_err);
0848 goto corrupt;
0849 }
0850
0851
0852 if (rx_stats->rs_more)
0853 return 0;
0854
0855
0856
0857
0858
0859
0860
0861
0862 if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
0863 goto corrupt;
0864
0865 hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
0866
0867 ath9k_process_tsf(rx_stats, rx_status, tsf);
0868 ath_debug_stat_rx(sc, rx_stats);
0869
0870
0871
0872
0873
0874 if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
0875
0876
0877
0878
0879
0880
0881
0882 if (hw->conf.radar_enabled) {
0883 ath9k_dfs_process_phyerr(sc, hdr, rx_stats,
0884 rx_status->mactime);
0885 } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
0886 ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
0887 rx_status->mactime)) {
0888 RX_STAT_INC(sc, rx_spectral);
0889 }
0890 return -EINVAL;
0891 }
0892
0893
0894
0895
0896
0897 spin_lock_bh(&sc->chan_lock);
0898 if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error,
0899 sc->cur_chan->rxfilter)) {
0900 spin_unlock_bh(&sc->chan_lock);
0901 return -EINVAL;
0902 }
0903 spin_unlock_bh(&sc->chan_lock);
0904
0905 if (ath_is_mybeacon(common, hdr)) {
0906 RX_STAT_INC(sc, rx_beacons);
0907 rx_stats->is_mybeacon = true;
0908 }
0909
0910
0911
0912
0913 if (WARN_ON(!ah->curchan))
0914 return -EINVAL;
0915
0916 if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
0917
0918
0919
0920
0921 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
0922 rx_stats->rs_rate);
0923 RX_STAT_INC(sc, rx_rate_err);
0924 return -EINVAL;
0925 }
0926
0927 if (ath9k_is_chanctx_enabled()) {
0928 if (rx_stats->is_mybeacon)
0929 ath_chanctx_beacon_recv_ev(sc,
0930 ATH_CHANCTX_EVENT_BEACON_RECEIVED);
0931 }
0932
0933 ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
0934
0935 rx_status->band = ah->curchan->chan->band;
0936 rx_status->freq = ah->curchan->chan->center_freq;
0937 rx_status->antenna = rx_stats->rs_antenna;
0938 rx_status->flag |= RX_FLAG_MACTIME_END;
0939
0940 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
0941 if (ieee80211_is_data_present(hdr->frame_control) &&
0942 !ieee80211_is_qos_nullfunc(hdr->frame_control))
0943 sc->rx.num_pkts++;
0944 #endif
0945
0946 return 0;
0947
0948 corrupt:
0949 sc->rx.discard_next = rx_stats->rs_more;
0950 return -EINVAL;
0951 }
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 static void ath9k_antenna_check(struct ath_softc *sc,
0964 struct ath_rx_status *rs)
0965 {
0966 struct ath_hw *ah = sc->sc_ah;
0967 struct ath9k_hw_capabilities *pCap = &ah->caps;
0968 struct ath_common *common = ath9k_hw_common(ah);
0969
0970 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
0971 return;
0972
0973
0974
0975
0976
0977 if (sc->rx.defant != rs->rs_antenna) {
0978 if (++sc->rx.rxotherant >= 3)
0979 ath_setdefantenna(sc, rs->rs_antenna);
0980 } else {
0981 sc->rx.rxotherant = 0;
0982 }
0983
0984 if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
0985 if (common->bt_ant_diversity)
0986 ath_ant_comb_scan(sc, rs);
0987 } else {
0988 ath_ant_comb_scan(sc, rs);
0989 }
0990 }
0991
0992 static void ath9k_apply_ampdu_details(struct ath_softc *sc,
0993 struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
0994 {
0995 if (rs->rs_isaggr) {
0996 rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
0997
0998 rxs->ampdu_reference = sc->rx.ampdu_ref;
0999
1000 if (!rs->rs_moreaggr) {
1001 rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
1002 sc->rx.ampdu_ref++;
1003 }
1004
1005 if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
1006 rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
1007 }
1008 }
1009
1010 static void ath_rx_count_airtime(struct ath_softc *sc,
1011 struct ath_rx_status *rs,
1012 struct sk_buff *skb)
1013 {
1014 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1015 struct ath_hw *ah = sc->sc_ah;
1016 struct ath_common *common = ath9k_hw_common(ah);
1017 struct ieee80211_sta *sta;
1018 struct ieee80211_rx_status *rxs;
1019 const struct ieee80211_rate *rate;
1020 bool is_sgi, is_40, is_sp;
1021 int phy;
1022 u16 len = rs->rs_datalen;
1023 u32 airtime = 0;
1024 u8 tidno;
1025
1026 if (!ieee80211_is_data(hdr->frame_control))
1027 return;
1028
1029 rcu_read_lock();
1030
1031 sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
1032 if (!sta)
1033 goto exit;
1034 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1035
1036 rxs = IEEE80211_SKB_RXCB(skb);
1037
1038 is_sgi = !!(rxs->enc_flags & RX_ENC_FLAG_SHORT_GI);
1039 is_40 = !!(rxs->bw == RATE_INFO_BW_40);
1040 is_sp = !!(rxs->enc_flags & RX_ENC_FLAG_SHORTPRE);
1041
1042 if (!!(rxs->encoding == RX_ENC_HT)) {
1043
1044
1045 airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
1046 is_40, is_sgi, is_sp);
1047 } else {
1048
1049 phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
1050 rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx];
1051 airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100,
1052 len, rxs->rate_idx, is_sp);
1053 }
1054
1055 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
1056 exit:
1057 rcu_read_unlock();
1058 }
1059
1060 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1061 {
1062 struct ath_rxbuf *bf;
1063 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1064 struct ieee80211_rx_status *rxs;
1065 struct ath_hw *ah = sc->sc_ah;
1066 struct ath_common *common = ath9k_hw_common(ah);
1067 struct ieee80211_hw *hw = sc->hw;
1068 int retval;
1069 struct ath_rx_status rs;
1070 enum ath9k_rx_qtype qtype;
1071 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1072 int dma_type;
1073 u64 tsf = 0;
1074 unsigned long flags;
1075 dma_addr_t new_buf_addr;
1076 unsigned int budget = 512;
1077 struct ieee80211_hdr *hdr;
1078
1079 if (edma)
1080 dma_type = DMA_BIDIRECTIONAL;
1081 else
1082 dma_type = DMA_FROM_DEVICE;
1083
1084 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1085
1086 tsf = ath9k_hw_gettsf64(ah);
1087
1088 do {
1089 bool decrypt_error = false;
1090
1091 memset(&rs, 0, sizeof(rs));
1092 if (edma)
1093 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1094 else
1095 bf = ath_get_next_rx_buf(sc, &rs);
1096
1097 if (!bf)
1098 break;
1099
1100 skb = bf->bf_mpdu;
1101 if (!skb)
1102 continue;
1103
1104
1105
1106
1107
1108 if (sc->rx.frag)
1109 hdr_skb = sc->rx.frag;
1110 else
1111 hdr_skb = skb;
1112
1113 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1114 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1115
1116 retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
1117 &decrypt_error, tsf);
1118 if (retval)
1119 goto requeue_drop_frag;
1120
1121
1122
1123 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1124
1125
1126
1127
1128
1129 if (!requeue_skb) {
1130 RX_STAT_INC(sc, rx_oom_err);
1131 goto requeue_drop_frag;
1132 }
1133
1134
1135 new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1136 common->rx_bufsize, dma_type);
1137 if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
1138 dev_kfree_skb_any(requeue_skb);
1139 goto requeue_drop_frag;
1140 }
1141
1142
1143 dma_unmap_single(sc->dev, bf->bf_buf_addr,
1144 common->rx_bufsize, dma_type);
1145
1146 bf->bf_mpdu = requeue_skb;
1147 bf->bf_buf_addr = new_buf_addr;
1148
1149 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1150 if (ah->caps.rx_status_len)
1151 skb_pull(skb, ah->caps.rx_status_len);
1152
1153 if (!rs.rs_more)
1154 ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
1155 rxs, decrypt_error);
1156
1157 if (rs.rs_more) {
1158 RX_STAT_INC(sc, rx_frags);
1159
1160
1161
1162
1163
1164 if (sc->rx.frag) {
1165
1166 dev_kfree_skb_any(sc->rx.frag);
1167 dev_kfree_skb_any(skb);
1168 RX_STAT_INC(sc, rx_too_many_frags_err);
1169 skb = NULL;
1170 }
1171 sc->rx.frag = skb;
1172 goto requeue;
1173 }
1174
1175 if (sc->rx.frag) {
1176 int space = skb->len - skb_tailroom(hdr_skb);
1177
1178 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1179 dev_kfree_skb(skb);
1180 RX_STAT_INC(sc, rx_oom_err);
1181 goto requeue_drop_frag;
1182 }
1183
1184 sc->rx.frag = NULL;
1185
1186 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1187 skb->len);
1188 dev_kfree_skb_any(skb);
1189 skb = hdr_skb;
1190 }
1191
1192 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1193 skb_trim(skb, skb->len - 8);
1194
1195 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1196 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1197 PS_WAIT_FOR_CAB |
1198 PS_WAIT_FOR_PSPOLL_DATA)) ||
1199 ath9k_check_auto_sleep(sc))
1200 ath_rx_ps(sc, skb, rs.is_mybeacon);
1201 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1202
1203 ath9k_antenna_check(sc, &rs);
1204 ath9k_apply_ampdu_details(sc, &rs, rxs);
1205 ath_debug_rate_stats(sc, &rs, skb);
1206 ath_rx_count_airtime(sc, &rs, skb);
1207
1208 hdr = (struct ieee80211_hdr *)skb->data;
1209 if (ieee80211_is_ack(hdr->frame_control))
1210 ath_dynack_sample_ack_ts(sc->sc_ah, skb, rs.rs_tstamp);
1211
1212 ieee80211_rx(hw, skb);
1213
1214 requeue_drop_frag:
1215 if (sc->rx.frag) {
1216 dev_kfree_skb_any(sc->rx.frag);
1217 sc->rx.frag = NULL;
1218 }
1219 requeue:
1220 list_add_tail(&bf->list, &sc->rx.rxbuf);
1221
1222 if (!edma) {
1223 ath_rx_buf_relink(sc, bf, flush);
1224 if (!flush)
1225 ath9k_hw_rxena(ah);
1226 } else if (!flush) {
1227 ath_rx_edma_buf_link(sc, qtype);
1228 }
1229
1230 if (!budget--)
1231 break;
1232 } while (1);
1233
1234 if (!(ah->imask & ATH9K_INT_RXEOL)) {
1235 ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1236 ath9k_hw_set_interrupts(ah);
1237 }
1238
1239 return 0;
1240 }