0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0033
0034 #include "ath5k.h"
0035 #include "reg.h"
0036 #include "debug.h"
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 void
0048 ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
0049 {
0050 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
0051 ath5k_hw_reg_read(ah, AR5K_CR);
0052 }
0053
0054
0055
0056
0057
0058 static int
0059 ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
0060 {
0061 unsigned int i;
0062
0063 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
0064
0065
0066
0067
0068 for (i = 1000; i > 0 &&
0069 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
0070 i--)
0071 udelay(100);
0072
0073 if (!i)
0074 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0075 "failed to stop RX DMA !\n");
0076
0077 return i ? 0 : -EBUSY;
0078 }
0079
0080
0081
0082
0083
0084 u32
0085 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
0086 {
0087 return ath5k_hw_reg_read(ah, AR5K_RXDP);
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097 int
0098 ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
0099 {
0100 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
0101 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0102 "tried to set RXDP while rx was active !\n");
0103 return -EIO;
0104 }
0105
0106 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
0107 return 0;
0108 }
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 int
0130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
0131 {
0132 u32 tx_queue;
0133
0134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0135
0136
0137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
0138 return -EINVAL;
0139
0140 if (ah->ah_version == AR5K_AR5210) {
0141 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
0142
0143
0144
0145
0146 switch (ah->ah_txq[queue].tqi_type) {
0147 case AR5K_TX_QUEUE_DATA:
0148 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
0149 break;
0150 case AR5K_TX_QUEUE_BEACON:
0151 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
0152 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
0153 AR5K_BSR);
0154 break;
0155 case AR5K_TX_QUEUE_CAB:
0156 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
0157 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
0158 AR5K_BCR_BDMAE, AR5K_BSR);
0159 break;
0160 default:
0161 return -EINVAL;
0162 }
0163
0164 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
0165 ath5k_hw_reg_read(ah, AR5K_CR);
0166 } else {
0167
0168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
0169 return -EIO;
0170
0171
0172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
0173 }
0174
0175 return 0;
0176 }
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 static int
0188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
0189 {
0190 unsigned int i = 40;
0191 u32 tx_queue, pending;
0192
0193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0194
0195
0196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
0197 return -EINVAL;
0198
0199 if (ah->ah_version == AR5K_AR5210) {
0200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
0201
0202
0203
0204
0205 switch (ah->ah_txq[queue].tqi_type) {
0206 case AR5K_TX_QUEUE_DATA:
0207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
0208 break;
0209 case AR5K_TX_QUEUE_BEACON:
0210 case AR5K_TX_QUEUE_CAB:
0211
0212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
0213 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
0214 break;
0215 default:
0216 return -EINVAL;
0217 }
0218
0219
0220 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
0221 ath5k_hw_reg_read(ah, AR5K_CR);
0222 } else {
0223
0224
0225
0226
0227
0228 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
0229 AR5K_QCU_MISC_DCU_EARLY);
0230
0231
0232
0233
0234 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
0235
0236
0237 for (i = 1000; i > 0 &&
0238 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
0239 i--)
0240 udelay(100);
0241
0242 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
0243 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0244 "queue %i didn't stop !\n", queue);
0245
0246
0247 i = 1000;
0248 do {
0249 pending = ath5k_hw_reg_read(ah,
0250 AR5K_QUEUE_STATUS(queue)) &
0251 AR5K_QCU_STS_FRMPENDCNT;
0252 udelay(100);
0253 } while (--i && pending);
0254
0255
0256
0257 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
0258 pending) {
0259
0260 ath5k_hw_reg_write(ah,
0261 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
0262 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
0263 AR5K_QUIET_CTL2);
0264
0265
0266 ath5k_hw_reg_write(ah,
0267 AR5K_QUIET_CTL1_QT_EN |
0268 AR5K_REG_SM(ath5k_hw_reg_read(ah,
0269 AR5K_TSF_L32_5211) >> 10,
0270 AR5K_QUIET_CTL1_NEXT_QT_TSF),
0271 AR5K_QUIET_CTL1);
0272
0273
0274 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
0275 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
0276
0277
0278 udelay(400);
0279 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
0280 AR5K_QUIET_CTL1_QT_EN);
0281
0282
0283 i = 100;
0284 do {
0285 pending = ath5k_hw_reg_read(ah,
0286 AR5K_QUEUE_STATUS(queue)) &
0287 AR5K_QCU_STS_FRMPENDCNT;
0288 udelay(100);
0289 } while (--i && pending);
0290
0291 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
0292 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
0293
0294 if (pending)
0295 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0296 "quiet mechanism didn't work q:%i !\n",
0297 queue);
0298 }
0299
0300
0301
0302
0303 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
0304 AR5K_QCU_MISC_DCU_EARLY);
0305
0306
0307 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
0308 if (pending) {
0309 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0310 "tx dma didn't stop (q:%i, frm:%i) !\n",
0311 queue, pending);
0312 return -EBUSY;
0313 }
0314 }
0315
0316
0317 return 0;
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327 int
0328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
0329 {
0330 int ret;
0331 ret = ath5k_hw_stop_tx_dma(ah, queue);
0332 if (ret) {
0333 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0334 "beacon queue didn't stop !\n");
0335 return -EIO;
0336 }
0337 return 0;
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352 u32
0353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
0354 {
0355 u16 tx_reg;
0356
0357 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0358
0359
0360
0361
0362
0363 if (ah->ah_version == AR5K_AR5210) {
0364 switch (ah->ah_txq[queue].tqi_type) {
0365 case AR5K_TX_QUEUE_DATA:
0366 tx_reg = AR5K_NOQCU_TXDP0;
0367 break;
0368 case AR5K_TX_QUEUE_BEACON:
0369 case AR5K_TX_QUEUE_CAB:
0370 tx_reg = AR5K_NOQCU_TXDP1;
0371 break;
0372 default:
0373 return 0xffffffff;
0374 }
0375 } else {
0376 tx_reg = AR5K_QUEUE_TXDP(queue);
0377 }
0378
0379 return ath5k_hw_reg_read(ah, tx_reg);
0380 }
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 int
0396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
0397 {
0398 u16 tx_reg;
0399
0400 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0401
0402
0403
0404
0405
0406 if (ah->ah_version == AR5K_AR5210) {
0407 switch (ah->ah_txq[queue].tqi_type) {
0408 case AR5K_TX_QUEUE_DATA:
0409 tx_reg = AR5K_NOQCU_TXDP0;
0410 break;
0411 case AR5K_TX_QUEUE_BEACON:
0412 case AR5K_TX_QUEUE_CAB:
0413 tx_reg = AR5K_NOQCU_TXDP1;
0414 break;
0415 default:
0416 return -EINVAL;
0417 }
0418 } else {
0419
0420
0421
0422
0423
0424 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
0425 return -EIO;
0426
0427 tx_reg = AR5K_QUEUE_TXDP(queue);
0428 }
0429
0430
0431 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
0432
0433 return 0;
0434 }
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 int
0453 ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
0454 {
0455 u32 trigger_level, imr;
0456 int ret = -EIO;
0457
0458
0459
0460
0461 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
0462
0463 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
0464 AR5K_TXCFG_TXFULL);
0465
0466 if (!increase) {
0467 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
0468 goto done;
0469 } else
0470 trigger_level +=
0471 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
0472
0473
0474
0475
0476 if (ah->ah_version == AR5K_AR5210)
0477 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
0478 else
0479 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
0480 AR5K_TXCFG_TXFULL, trigger_level);
0481
0482 ret = 0;
0483
0484 done:
0485
0486
0487
0488 ath5k_hw_set_imr(ah, imr);
0489
0490 return ret;
0491 }
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 bool
0506 ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
0507 {
0508 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
0509 }
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 int
0527 ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
0528 {
0529 u32 data = 0;
0530
0531
0532
0533
0534
0535
0536
0537 if (ah->ah_version == AR5K_AR5210) {
0538 u32 isr = 0;
0539 isr = ath5k_hw_reg_read(ah, AR5K_ISR);
0540 if (unlikely(isr == AR5K_INT_NOCARD)) {
0541 *interrupt_mask = isr;
0542 return -ENODEV;
0543 }
0544
0545
0546
0547
0548
0549 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
0550
0551
0552 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
0553 | AR5K_ISR_DPERR)))
0554 *interrupt_mask |= AR5K_INT_FATAL;
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 data = isr;
0565 } else {
0566 u32 pisr = 0;
0567 u32 pisr_clear = 0;
0568 u32 sisr0 = 0;
0569 u32 sisr1 = 0;
0570 u32 sisr2 = 0;
0571 u32 sisr3 = 0;
0572 u32 sisr4 = 0;
0573
0574
0575 pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
0576 if (unlikely(pisr == AR5K_INT_NOCARD)) {
0577 *interrupt_mask = pisr;
0578 return -ENODEV;
0579 }
0580
0581 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
0582 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
0583 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
0584 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
0585 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
0628 (pisr & AR5K_INT_TX_ALL);
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
0639 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
0640 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
0641 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
0642 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
0643 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
0644
0645 ath5k_hw_reg_read(ah, AR5K_PISR);
0646
0647
0648
0649
0650
0651 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
0652
0653 ah->ah_txq_isr_txok_all = 0;
0654
0655
0656
0657
0658 if (pisr & AR5K_ISR_TXOK)
0659 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
0660 AR5K_SISR0_QCU_TXOK);
0661
0662 if (pisr & AR5K_ISR_TXDESC)
0663 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
0664 AR5K_SISR0_QCU_TXDESC);
0665
0666 if (pisr & AR5K_ISR_TXERR)
0667 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
0668 AR5K_SISR1_QCU_TXERR);
0669
0670 if (pisr & AR5K_ISR_TXEOL)
0671 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
0672 AR5K_SISR1_QCU_TXEOL);
0673
0674
0675
0676
0677 if (pisr & AR5K_ISR_TIM)
0678 *interrupt_mask |= AR5K_INT_TIM;
0679
0680
0681 if (pisr & AR5K_ISR_BCNMISC) {
0682 if (sisr2 & AR5K_SISR2_TIM)
0683 *interrupt_mask |= AR5K_INT_TIM;
0684 if (sisr2 & AR5K_SISR2_DTIM)
0685 *interrupt_mask |= AR5K_INT_DTIM;
0686 if (sisr2 & AR5K_SISR2_DTIM_SYNC)
0687 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
0688 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
0689 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
0690 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
0691 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
0692 }
0693
0694
0695
0696
0697
0698 if (unlikely(pisr & (AR5K_ISR_HIUERR)))
0699 *interrupt_mask |= AR5K_INT_FATAL;
0700
0701
0702 if (unlikely(pisr & (AR5K_ISR_BNR)))
0703 *interrupt_mask |= AR5K_INT_BNR;
0704
0705
0706 if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
0707 *interrupt_mask |= AR5K_INT_QCBRORN;
0708
0709
0710 if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
0711 *interrupt_mask |= AR5K_INT_QCBRURN;
0712
0713
0714 if (unlikely(pisr & (AR5K_ISR_QTRIG)))
0715 *interrupt_mask |= AR5K_INT_QTRIG;
0716
0717 data = pisr;
0718 }
0719
0720
0721
0722
0723
0724 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
0725 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
0726
0727 return 0;
0728 }
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739 enum ath5k_int
0740 ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
0741 {
0742 enum ath5k_int old_mask, int_mask;
0743
0744 old_mask = ah->ah_imr;
0745
0746
0747
0748
0749
0750
0751 if (old_mask & AR5K_INT_GLOBAL) {
0752 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
0753 ath5k_hw_reg_read(ah, AR5K_IER);
0754 }
0755
0756
0757
0758
0759
0760 int_mask = new_mask & AR5K_INT_COMMON;
0761
0762 if (ah->ah_version != AR5K_AR5210) {
0763
0764 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
0765 & AR5K_SIMR2_QCU_TXURN;
0766
0767
0768 if (new_mask & AR5K_INT_FATAL) {
0769 int_mask |= AR5K_IMR_HIUERR;
0770 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
0771 | AR5K_SIMR2_DPERR);
0772 }
0773
0774
0775 if (new_mask & AR5K_INT_TIM)
0776 int_mask |= AR5K_IMR_TIM;
0777
0778 if (new_mask & AR5K_INT_TIM)
0779 simr2 |= AR5K_SISR2_TIM;
0780 if (new_mask & AR5K_INT_DTIM)
0781 simr2 |= AR5K_SISR2_DTIM;
0782 if (new_mask & AR5K_INT_DTIM_SYNC)
0783 simr2 |= AR5K_SISR2_DTIM_SYNC;
0784 if (new_mask & AR5K_INT_BCN_TIMEOUT)
0785 simr2 |= AR5K_SISR2_BCN_TIMEOUT;
0786 if (new_mask & AR5K_INT_CAB_TIMEOUT)
0787 simr2 |= AR5K_SISR2_CAB_TIMEOUT;
0788
0789
0790 if (new_mask & AR5K_INT_BNR)
0791 int_mask |= AR5K_INT_BNR;
0792
0793
0794
0795 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
0796 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
0797
0798 } else {
0799
0800 if (new_mask & AR5K_INT_FATAL)
0801 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
0802 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
0803
0804
0805 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
0806 }
0807
0808
0809
0810 if (!(new_mask & AR5K_INT_RXNOFRM))
0811 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
0812
0813
0814 ah->ah_imr = new_mask;
0815
0816
0817 if (new_mask & AR5K_INT_GLOBAL) {
0818 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
0819 ath5k_hw_reg_read(ah, AR5K_IER);
0820 }
0821
0822 return old_mask;
0823 }
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840 void
0841 ath5k_hw_dma_init(struct ath5k_hw *ah)
0842 {
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856 if (ah->ah_version != AR5K_AR5210) {
0857 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
0858 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
0859 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
0860 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
0861 }
0862
0863
0864 if (ah->ah_version != AR5K_AR5210)
0865 ath5k_hw_set_imr(ah, ah->ah_imr);
0866
0867 }
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880 int
0881 ath5k_hw_dma_stop(struct ath5k_hw *ah)
0882 {
0883 int i, qmax, err;
0884 err = 0;
0885
0886
0887 ath5k_hw_set_imr(ah, 0);
0888
0889
0890 err = ath5k_hw_stop_rx_dma(ah);
0891 if (err)
0892 return err;
0893
0894
0895
0896 if (ah->ah_version != AR5K_AR5210) {
0897 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
0898 qmax = AR5K_NUM_TX_QUEUES;
0899 } else {
0900
0901 ath5k_hw_reg_read(ah, AR5K_ISR);
0902 qmax = AR5K_NUM_TX_QUEUES_NOQCU;
0903 }
0904
0905 for (i = 0; i < qmax; i++) {
0906 err = ath5k_hw_stop_tx_dma(ah, i);
0907
0908 if (err && err != -EINVAL)
0909 return err;
0910 }
0911
0912 return 0;
0913 }