Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
0003  * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
0004  *
0005  * Permission to use, copy, modify, and distribute this software for any
0006  * purpose with or without fee is hereby granted, provided that the above
0007  * copyright notice and this permission notice appear in all copies.
0008  *
0009  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
0010  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
0011  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
0012  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
0013  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0014  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0015  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0016  *
0017  */
0018 
0019 /*************************************\
0020 * DMA and interrupt masking functions *
0021 \*************************************/
0022 
0023 /**
0024  * DOC: DMA and interrupt masking functions
0025  *
0026  * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
0027  * handle queue setup for 5210 chipset (rest are handled on qcu.c).
0028  * Also we setup interrupt mask register (IMR) and read the various interrupt
0029  * status registers (ISR).
0030  */
0031 
0032 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0033 
0034 #include "ath5k.h"
0035 #include "reg.h"
0036 #include "debug.h"
0037 
0038 
0039 /*********\
0040 * Receive *
0041 \*********/
0042 
0043 /**
0044  * ath5k_hw_start_rx_dma() - Start DMA receive
0045  * @ah: The &struct ath5k_hw
0046  */
0047 void
0048 ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
0049 {
0050     ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
0051     ath5k_hw_reg_read(ah, AR5K_CR);
0052 }
0053 
0054 /**
0055  * ath5k_hw_stop_rx_dma() - Stop DMA receive
0056  * @ah: The &struct ath5k_hw
0057  */
0058 static int
0059 ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
0060 {
0061     unsigned int i;
0062 
0063     ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
0064 
0065     /*
0066      * It may take some time to disable the DMA receive unit
0067      */
0068     for (i = 1000; i > 0 &&
0069             (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
0070             i--)
0071         udelay(100);
0072 
0073     if (!i)
0074         ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0075                 "failed to stop RX DMA !\n");
0076 
0077     return i ? 0 : -EBUSY;
0078 }
0079 
0080 /**
0081  * ath5k_hw_get_rxdp() - Get RX Descriptor's address
0082  * @ah: The &struct ath5k_hw
0083  */
0084 u32
0085 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
0086 {
0087     return ath5k_hw_reg_read(ah, AR5K_RXDP);
0088 }
0089 
0090 /**
0091  * ath5k_hw_set_rxdp() - Set RX Descriptor's address
0092  * @ah: The &struct ath5k_hw
0093  * @phys_addr: RX descriptor address
0094  *
0095  * Returns -EIO if rx is active
0096  */
0097 int
0098 ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
0099 {
0100     if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
0101         ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0102                 "tried to set RXDP while rx was active !\n");
0103         return -EIO;
0104     }
0105 
0106     ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
0107     return 0;
0108 }
0109 
0110 
0111 /**********\
0112 * Transmit *
0113 \**********/
0114 
0115 /**
0116  * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
0117  * @ah: The &struct ath5k_hw
0118  * @queue: The hw queue number
0119  *
0120  * Start DMA transmit for a specific queue and since 5210 doesn't have
0121  * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
0122  * queue for normal data and one queue for beacons). For queue setup
0123  * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
0124  * of range or if queue is already disabled.
0125  *
0126  * NOTE: Must be called after setting up tx control descriptor for that
0127  * queue (see below).
0128  */
0129 int
0130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
0131 {
0132     u32 tx_queue;
0133 
0134     AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0135 
0136     /* Return if queue is declared inactive */
0137     if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
0138         return -EINVAL;
0139 
0140     if (ah->ah_version == AR5K_AR5210) {
0141         tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
0142 
0143         /*
0144          * Set the queue by type on 5210
0145          */
0146         switch (ah->ah_txq[queue].tqi_type) {
0147         case AR5K_TX_QUEUE_DATA:
0148             tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
0149             break;
0150         case AR5K_TX_QUEUE_BEACON:
0151             tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
0152             ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
0153                     AR5K_BSR);
0154             break;
0155         case AR5K_TX_QUEUE_CAB:
0156             tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
0157             ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
0158                 AR5K_BCR_BDMAE, AR5K_BSR);
0159             break;
0160         default:
0161             return -EINVAL;
0162         }
0163         /* Start queue */
0164         ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
0165         ath5k_hw_reg_read(ah, AR5K_CR);
0166     } else {
0167         /* Return if queue is disabled */
0168         if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
0169             return -EIO;
0170 
0171         /* Start queue */
0172         AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
0173     }
0174 
0175     return 0;
0176 }
0177 
0178 /**
0179  * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
0180  * @ah: The &struct ath5k_hw
0181  * @queue: The hw queue number
0182  *
0183  * Stop DMA transmit on a specific hw queue and drain queue so we don't
0184  * have any pending frames. Returns -EBUSY if we still have pending frames,
0185  * -EINVAL if queue number is out of range or inactive.
0186  */
0187 static int
0188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
0189 {
0190     unsigned int i = 40;
0191     u32 tx_queue, pending;
0192 
0193     AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0194 
0195     /* Return if queue is declared inactive */
0196     if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
0197         return -EINVAL;
0198 
0199     if (ah->ah_version == AR5K_AR5210) {
0200         tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
0201 
0202         /*
0203          * Set by queue type
0204          */
0205         switch (ah->ah_txq[queue].tqi_type) {
0206         case AR5K_TX_QUEUE_DATA:
0207             tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
0208             break;
0209         case AR5K_TX_QUEUE_BEACON:
0210         case AR5K_TX_QUEUE_CAB:
0211             /* XXX Fix me... */
0212             tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
0213             ath5k_hw_reg_write(ah, 0, AR5K_BSR);
0214             break;
0215         default:
0216             return -EINVAL;
0217         }
0218 
0219         /* Stop queue */
0220         ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
0221         ath5k_hw_reg_read(ah, AR5K_CR);
0222     } else {
0223 
0224         /*
0225          * Enable DCU early termination to quickly
0226          * flush any pending frames from QCU
0227          */
0228         AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
0229                     AR5K_QCU_MISC_DCU_EARLY);
0230 
0231         /*
0232          * Schedule TX disable and wait until queue is empty
0233          */
0234         AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
0235 
0236         /* Wait for queue to stop */
0237         for (i = 1000; i > 0 &&
0238         (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
0239         i--)
0240             udelay(100);
0241 
0242         if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
0243             ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0244                 "queue %i didn't stop !\n", queue);
0245 
0246         /* Check for pending frames */
0247         i = 1000;
0248         do {
0249             pending = ath5k_hw_reg_read(ah,
0250                 AR5K_QUEUE_STATUS(queue)) &
0251                 AR5K_QCU_STS_FRMPENDCNT;
0252             udelay(100);
0253         } while (--i && pending);
0254 
0255         /* For 2413+ order PCU to drop packets using
0256          * QUIET mechanism */
0257         if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
0258             pending) {
0259             /* Set periodicity and duration */
0260             ath5k_hw_reg_write(ah,
0261                 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
0262                 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
0263                 AR5K_QUIET_CTL2);
0264 
0265             /* Enable quiet period for current TSF */
0266             ath5k_hw_reg_write(ah,
0267                 AR5K_QUIET_CTL1_QT_EN |
0268                 AR5K_REG_SM(ath5k_hw_reg_read(ah,
0269                         AR5K_TSF_L32_5211) >> 10,
0270                         AR5K_QUIET_CTL1_NEXT_QT_TSF),
0271                 AR5K_QUIET_CTL1);
0272 
0273             /* Force channel idle high */
0274             AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
0275                     AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
0276 
0277             /* Wait a while and disable mechanism */
0278             udelay(400);
0279             AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
0280                         AR5K_QUIET_CTL1_QT_EN);
0281 
0282             /* Re-check for pending frames */
0283             i = 100;
0284             do {
0285                 pending = ath5k_hw_reg_read(ah,
0286                     AR5K_QUEUE_STATUS(queue)) &
0287                     AR5K_QCU_STS_FRMPENDCNT;
0288                 udelay(100);
0289             } while (--i && pending);
0290 
0291             AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
0292                     AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
0293 
0294             if (pending)
0295                 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0296                     "quiet mechanism didn't work q:%i !\n",
0297                     queue);
0298         }
0299 
0300         /*
0301          * Disable DCU early termination
0302          */
0303         AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
0304                     AR5K_QCU_MISC_DCU_EARLY);
0305 
0306         /* Clear register */
0307         ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
0308         if (pending) {
0309             ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0310                     "tx dma didn't stop (q:%i, frm:%i) !\n",
0311                     queue, pending);
0312             return -EBUSY;
0313         }
0314     }
0315 
0316     /* TODO: Check for success on 5210 else return error */
0317     return 0;
0318 }
0319 
0320 /**
0321  * ath5k_hw_stop_beacon_queue() - Stop beacon queue
0322  * @ah: The &struct ath5k_hw
0323  * @queue: The queue number
0324  *
0325  * Returns -EIO if queue didn't stop
0326  */
0327 int
0328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
0329 {
0330     int ret;
0331     ret = ath5k_hw_stop_tx_dma(ah, queue);
0332     if (ret) {
0333         ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
0334                 "beacon queue didn't stop !\n");
0335         return -EIO;
0336     }
0337     return 0;
0338 }
0339 
0340 /**
0341  * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
0342  * @ah: The &struct ath5k_hw
0343  * @queue: The hw queue number
0344  *
0345  * Get TX descriptor's address for a specific queue. For 5210 we ignore
0346  * the queue number and use tx queue type since we only have 2 queues.
0347  * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
0348  * For newer chips with QCU/DCU we just read the corresponding TXDP register.
0349  *
0350  * XXX: Is TXDP read and clear ?
0351  */
0352 u32
0353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
0354 {
0355     u16 tx_reg;
0356 
0357     AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0358 
0359     /*
0360      * Get the transmit queue descriptor pointer from the selected queue
0361      */
0362     /*5210 doesn't have QCU*/
0363     if (ah->ah_version == AR5K_AR5210) {
0364         switch (ah->ah_txq[queue].tqi_type) {
0365         case AR5K_TX_QUEUE_DATA:
0366             tx_reg = AR5K_NOQCU_TXDP0;
0367             break;
0368         case AR5K_TX_QUEUE_BEACON:
0369         case AR5K_TX_QUEUE_CAB:
0370             tx_reg = AR5K_NOQCU_TXDP1;
0371             break;
0372         default:
0373             return 0xffffffff;
0374         }
0375     } else {
0376         tx_reg = AR5K_QUEUE_TXDP(queue);
0377     }
0378 
0379     return ath5k_hw_reg_read(ah, tx_reg);
0380 }
0381 
0382 /**
0383  * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
0384  * @ah: The &struct ath5k_hw
0385  * @queue: The hw queue number
0386  * @phys_addr: The physical address
0387  *
0388  * Set TX descriptor's address for a specific queue. For 5210 we ignore
0389  * the queue number and we use tx queue type since we only have 2 queues
0390  * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
0391  * For newer chips with QCU/DCU we just set the corresponding TXDP register.
0392  * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
0393  * active.
0394  */
0395 int
0396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
0397 {
0398     u16 tx_reg;
0399 
0400     AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
0401 
0402     /*
0403      * Set the transmit queue descriptor pointer register by type
0404      * on 5210
0405      */
0406     if (ah->ah_version == AR5K_AR5210) {
0407         switch (ah->ah_txq[queue].tqi_type) {
0408         case AR5K_TX_QUEUE_DATA:
0409             tx_reg = AR5K_NOQCU_TXDP0;
0410             break;
0411         case AR5K_TX_QUEUE_BEACON:
0412         case AR5K_TX_QUEUE_CAB:
0413             tx_reg = AR5K_NOQCU_TXDP1;
0414             break;
0415         default:
0416             return -EINVAL;
0417         }
0418     } else {
0419         /*
0420          * Set the transmit queue descriptor pointer for
0421          * the selected queue on QCU for 5211+
0422          * (this won't work if the queue is still active)
0423          */
0424         if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
0425             return -EIO;
0426 
0427         tx_reg = AR5K_QUEUE_TXDP(queue);
0428     }
0429 
0430     /* Set descriptor pointer */
0431     ath5k_hw_reg_write(ah, phys_addr, tx_reg);
0432 
0433     return 0;
0434 }
0435 
0436 /**
0437  * ath5k_hw_update_tx_triglevel() - Update tx trigger level
0438  * @ah: The &struct ath5k_hw
0439  * @increase: Flag to force increase of trigger level
0440  *
0441  * This function increases/decreases the tx trigger level for the tx fifo
0442  * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
0443  * the buffer and transmits its data. Lowering this results sending small
0444  * frames more quickly but can lead to tx underruns, raising it a lot can
0445  * result other problems. Right now we start with the lowest possible
0446  * (64Bytes) and if we get tx underrun we increase it using the increase
0447  * flag. Returns -EIO if we have reached maximum/minimum.
0448  *
0449  * XXX: Link this with tx DMA size ?
0450  * XXX2: Use it to save interrupts ?
0451  */
0452 int
0453 ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
0454 {
0455     u32 trigger_level, imr;
0456     int ret = -EIO;
0457 
0458     /*
0459      * Disable interrupts by setting the mask
0460      */
0461     imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
0462 
0463     trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
0464             AR5K_TXCFG_TXFULL);
0465 
0466     if (!increase) {
0467         if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
0468             goto done;
0469     } else
0470         trigger_level +=
0471             ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
0472 
0473     /*
0474      * Update trigger level on success
0475      */
0476     if (ah->ah_version == AR5K_AR5210)
0477         ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
0478     else
0479         AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
0480                 AR5K_TXCFG_TXFULL, trigger_level);
0481 
0482     ret = 0;
0483 
0484 done:
0485     /*
0486      * Restore interrupt mask
0487      */
0488     ath5k_hw_set_imr(ah, imr);
0489 
0490     return ret;
0491 }
0492 
0493 
0494 /*******************\
0495 * Interrupt masking *
0496 \*******************/
0497 
0498 /**
0499  * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
0500  * @ah: The &struct ath5k_hw
0501  *
0502  * Check if we have pending interrupts to process. Returns 1 if we
0503  * have pending interrupts and 0 if we haven't.
0504  */
0505 bool
0506 ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
0507 {
0508     return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
0509 }
0510 
0511 /**
0512  * ath5k_hw_get_isr() - Get interrupt status
0513  * @ah: The @struct ath5k_hw
0514  * @interrupt_mask: Driver's interrupt mask used to filter out
0515  * interrupts in sw.
0516  *
0517  * This function is used inside our interrupt handler to determine the reason
0518  * for the interrupt by reading Primary Interrupt Status Register. Returns an
0519  * abstract interrupt status mask which is mostly ISR with some uncommon bits
0520  * being mapped on some standard non hw-specific positions
0521  * (check out &ath5k_int).
0522  *
0523  * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
0524  * function gets called are cleared on return.
0525  */
0526 int
0527 ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
0528 {
0529     u32 data = 0;
0530 
0531     /*
0532      * Read interrupt status from Primary Interrupt
0533      * Register.
0534      *
0535      * Note: PISR/SISR Not available on 5210
0536      */
0537     if (ah->ah_version == AR5K_AR5210) {
0538         u32 isr = 0;
0539         isr = ath5k_hw_reg_read(ah, AR5K_ISR);
0540         if (unlikely(isr == AR5K_INT_NOCARD)) {
0541             *interrupt_mask = isr;
0542             return -ENODEV;
0543         }
0544 
0545         /*
0546          * Filter out the non-common bits from the interrupt
0547          * status.
0548          */
0549         *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
0550 
0551         /* Hanlde INT_FATAL */
0552         if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
0553                         | AR5K_ISR_DPERR)))
0554             *interrupt_mask |= AR5K_INT_FATAL;
0555 
0556         /*
0557          * XXX: BMISS interrupts may occur after association.
0558          * I found this on 5210 code but it needs testing. If this is
0559          * true we should disable them before assoc and re-enable them
0560          * after a successful assoc + some jiffies.
0561             interrupt_mask &= ~AR5K_INT_BMISS;
0562          */
0563 
0564         data = isr;
0565     } else {
0566         u32 pisr = 0;
0567         u32 pisr_clear = 0;
0568         u32 sisr0 = 0;
0569         u32 sisr1 = 0;
0570         u32 sisr2 = 0;
0571         u32 sisr3 = 0;
0572         u32 sisr4 = 0;
0573 
0574         /* Read PISR and SISRs... */
0575         pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
0576         if (unlikely(pisr == AR5K_INT_NOCARD)) {
0577             *interrupt_mask = pisr;
0578             return -ENODEV;
0579         }
0580 
0581         sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
0582         sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
0583         sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
0584         sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
0585         sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
0586 
0587         /*
0588          * PISR holds the logical OR of interrupt bits
0589          * from SISR registers:
0590          *
0591          * TXOK and TXDESC  -> Logical OR of TXOK and TXDESC
0592          *          per-queue bits on SISR0
0593          *
0594          * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
0595          *          per-queue bits on SISR1
0596          *
0597          * TXURN -> Logical OR of TXURN per-queue bits on SISR2
0598          *
0599          * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
0600          *
0601          * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
0602          *      BCN_TIMEOUT, CAB_TIMEOUT and DTIM
0603          *      (and TSFOOR ?) bits on SISR2
0604          *
0605          * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
0606          *          QCBRURN per-queue bits on SISR3
0607          * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
0608          *
0609          * If we clean these bits on PISR we 'll also clear all
0610          * related bits from SISRs, e.g. if we write the TXOK bit on
0611          * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
0612          * interrupt got fired for another queue while we were reading
0613          * the interrupt registers and we write back the TXOK bit on
0614          * PISR we 'll lose it. So make sure that we don't write back
0615          * on PISR any bits that come from SISRs. Clearing them from
0616          * SISRs will also clear PISR so no need to worry here.
0617          */
0618 
0619         /* XXX: There seems to be  an issue on some cards
0620          *  with tx interrupt flags not being updated
0621          *  on PISR despite that all Tx interrupt bits
0622          *  are cleared on SISRs. Since we handle all
0623          *  Tx queues all together it shouldn't be an
0624          *  issue if we clear Tx interrupt flags also
0625          *  on PISR to avoid that.
0626          */
0627         pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
0628                     (pisr & AR5K_INT_TX_ALL);
0629 
0630         /*
0631          * Write to clear them...
0632          * Note: This means that each bit we write back
0633          * to the registers will get cleared, leaving the
0634          * rest unaffected. So this won't affect new interrupts
0635          * we didn't catch while reading/processing, we 'll get
0636          * them next time get_isr gets called.
0637          */
0638         ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
0639         ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
0640         ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
0641         ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
0642         ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
0643         ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
0644         /* Flush previous write */
0645         ath5k_hw_reg_read(ah, AR5K_PISR);
0646 
0647         /*
0648          * Filter out the non-common bits from the interrupt
0649          * status.
0650          */
0651         *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
0652 
0653         ah->ah_txq_isr_txok_all = 0;
0654 
0655         /* We treat TXOK,TXDESC, TXERR and TXEOL
0656          * the same way (schedule the tx tasklet)
0657          * so we track them all together per queue */
0658         if (pisr & AR5K_ISR_TXOK)
0659             ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
0660                         AR5K_SISR0_QCU_TXOK);
0661 
0662         if (pisr & AR5K_ISR_TXDESC)
0663             ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
0664                         AR5K_SISR0_QCU_TXDESC);
0665 
0666         if (pisr & AR5K_ISR_TXERR)
0667             ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
0668                         AR5K_SISR1_QCU_TXERR);
0669 
0670         if (pisr & AR5K_ISR_TXEOL)
0671             ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
0672                         AR5K_SISR1_QCU_TXEOL);
0673 
0674         /* Misc Beacon related interrupts */
0675 
0676         /* For AR5211 */
0677         if (pisr & AR5K_ISR_TIM)
0678             *interrupt_mask |= AR5K_INT_TIM;
0679 
0680         /* For AR5212+ */
0681         if (pisr & AR5K_ISR_BCNMISC) {
0682             if (sisr2 & AR5K_SISR2_TIM)
0683                 *interrupt_mask |= AR5K_INT_TIM;
0684             if (sisr2 & AR5K_SISR2_DTIM)
0685                 *interrupt_mask |= AR5K_INT_DTIM;
0686             if (sisr2 & AR5K_SISR2_DTIM_SYNC)
0687                 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
0688             if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
0689                 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
0690             if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
0691                 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
0692         }
0693 
0694         /* Below interrupts are unlikely to happen */
0695 
0696         /* HIU = Host Interface Unit (PCI etc)
0697          * Can be one of MCABT, SSERR, DPERR from SISR2 */
0698         if (unlikely(pisr & (AR5K_ISR_HIUERR)))
0699             *interrupt_mask |= AR5K_INT_FATAL;
0700 
0701         /*Beacon Not Ready*/
0702         if (unlikely(pisr & (AR5K_ISR_BNR)))
0703             *interrupt_mask |= AR5K_INT_BNR;
0704 
0705         /* A queue got CBR overrun */
0706         if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
0707             *interrupt_mask |= AR5K_INT_QCBRORN;
0708 
0709         /* A queue got CBR underrun */
0710         if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
0711             *interrupt_mask |= AR5K_INT_QCBRURN;
0712 
0713         /* A queue got triggered */
0714         if (unlikely(pisr & (AR5K_ISR_QTRIG)))
0715             *interrupt_mask |= AR5K_INT_QTRIG;
0716 
0717         data = pisr;
0718     }
0719 
0720     /*
0721      * In case we didn't handle anything,
0722      * print the register value.
0723      */
0724     if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
0725         ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
0726 
0727     return 0;
0728 }
0729 
0730 /**
0731  * ath5k_hw_set_imr() - Set interrupt mask
0732  * @ah: The &struct ath5k_hw
0733  * @new_mask: The new interrupt mask to be set
0734  *
0735  * Set the interrupt mask in hw to save interrupts. We do that by mapping
0736  * ath5k_int bits to hw-specific bits to remove abstraction and writing
0737  * Interrupt Mask Register.
0738  */
0739 enum ath5k_int
0740 ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
0741 {
0742     enum ath5k_int old_mask, int_mask;
0743 
0744     old_mask = ah->ah_imr;
0745 
0746     /*
0747      * Disable card interrupts to prevent any race conditions
0748      * (they will be re-enabled afterwards if AR5K_INT GLOBAL
0749      * is set again on the new mask).
0750      */
0751     if (old_mask & AR5K_INT_GLOBAL) {
0752         ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
0753         ath5k_hw_reg_read(ah, AR5K_IER);
0754     }
0755 
0756     /*
0757      * Add additional, chipset-dependent interrupt mask flags
0758      * and write them to the IMR (interrupt mask register).
0759      */
0760     int_mask = new_mask & AR5K_INT_COMMON;
0761 
0762     if (ah->ah_version != AR5K_AR5210) {
0763         /* Preserve per queue TXURN interrupt mask */
0764         u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
0765                 & AR5K_SIMR2_QCU_TXURN;
0766 
0767         /* Fatal interrupt abstraction for 5211+ */
0768         if (new_mask & AR5K_INT_FATAL) {
0769             int_mask |= AR5K_IMR_HIUERR;
0770             simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
0771                 | AR5K_SIMR2_DPERR);
0772         }
0773 
0774         /* Misc beacon related interrupts */
0775         if (new_mask & AR5K_INT_TIM)
0776             int_mask |= AR5K_IMR_TIM;
0777 
0778         if (new_mask & AR5K_INT_TIM)
0779             simr2 |= AR5K_SISR2_TIM;
0780         if (new_mask & AR5K_INT_DTIM)
0781             simr2 |= AR5K_SISR2_DTIM;
0782         if (new_mask & AR5K_INT_DTIM_SYNC)
0783             simr2 |= AR5K_SISR2_DTIM_SYNC;
0784         if (new_mask & AR5K_INT_BCN_TIMEOUT)
0785             simr2 |= AR5K_SISR2_BCN_TIMEOUT;
0786         if (new_mask & AR5K_INT_CAB_TIMEOUT)
0787             simr2 |= AR5K_SISR2_CAB_TIMEOUT;
0788 
0789         /*Beacon Not Ready*/
0790         if (new_mask & AR5K_INT_BNR)
0791             int_mask |= AR5K_INT_BNR;
0792 
0793         /* Note: Per queue interrupt masks
0794          * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
0795         ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
0796         ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
0797 
0798     } else {
0799         /* Fatal interrupt abstraction for 5210 */
0800         if (new_mask & AR5K_INT_FATAL)
0801             int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
0802                 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
0803 
0804         /* Only common interrupts left for 5210 (no SIMRs) */
0805         ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
0806     }
0807 
0808     /* If RXNOFRM interrupt is masked disable it
0809      * by setting AR5K_RXNOFRM to zero */
0810     if (!(new_mask & AR5K_INT_RXNOFRM))
0811         ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
0812 
0813     /* Store new interrupt mask */
0814     ah->ah_imr = new_mask;
0815 
0816     /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
0817     if (new_mask & AR5K_INT_GLOBAL) {
0818         ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
0819         ath5k_hw_reg_read(ah, AR5K_IER);
0820     }
0821 
0822     return old_mask;
0823 }
0824 
0825 
0826 /********************\
0827  Init/Stop functions
0828 \********************/
0829 
0830 /**
0831  * ath5k_hw_dma_init() - Initialize DMA unit
0832  * @ah: The &struct ath5k_hw
0833  *
0834  * Set DMA size and pre-enable interrupts
0835  * (driver handles tx/rx buffer setup and
0836  * dma start/stop)
0837  *
0838  * XXX: Save/restore RXDP/TXDP registers ?
0839  */
0840 void
0841 ath5k_hw_dma_init(struct ath5k_hw *ah)
0842 {
0843     /*
0844      * Set Rx/Tx DMA Configuration
0845      *
0846      * Set standard DMA size (128). Note that
0847      * a DMA size of 512 causes rx overruns and tx errors
0848      * on pci-e cards (tested on 5424 but since rx overruns
0849      * also occur on 5416/5418 with madwifi we set 128
0850      * for all PCI-E cards to be safe).
0851      *
0852      * XXX: need to check 5210 for this
0853      * TODO: Check out tx trigger level, it's always 64 on dumps but I
0854      * guess we can tweak it and see how it goes ;-)
0855      */
0856     if (ah->ah_version != AR5K_AR5210) {
0857         AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
0858             AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
0859         AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
0860             AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
0861     }
0862 
0863     /* Pre-enable interrupts on 5211/5212*/
0864     if (ah->ah_version != AR5K_AR5210)
0865         ath5k_hw_set_imr(ah, ah->ah_imr);
0866 
0867 }
0868 
0869 /**
0870  * ath5k_hw_dma_stop() - stop DMA unit
0871  * @ah: The &struct ath5k_hw
0872  *
0873  * Stop tx/rx DMA and interrupts. Returns
0874  * -EBUSY if tx or rx dma failed to stop.
0875  *
0876  * XXX: Sometimes DMA unit hangs and we have
0877  * stuck frames on tx queues, only a reset
0878  * can fix that.
0879  */
0880 int
0881 ath5k_hw_dma_stop(struct ath5k_hw *ah)
0882 {
0883     int i, qmax, err;
0884     err = 0;
0885 
0886     /* Disable interrupts */
0887     ath5k_hw_set_imr(ah, 0);
0888 
0889     /* Stop rx dma */
0890     err = ath5k_hw_stop_rx_dma(ah);
0891     if (err)
0892         return err;
0893 
0894     /* Clear any pending interrupts
0895      * and disable tx dma */
0896     if (ah->ah_version != AR5K_AR5210) {
0897         ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
0898         qmax = AR5K_NUM_TX_QUEUES;
0899     } else {
0900         /* PISR/SISR Not available on 5210 */
0901         ath5k_hw_reg_read(ah, AR5K_ISR);
0902         qmax = AR5K_NUM_TX_QUEUES_NOQCU;
0903     }
0904 
0905     for (i = 0; i < qmax; i++) {
0906         err = ath5k_hw_stop_tx_dma(ah, i);
0907         /* -EINVAL -> queue inactive */
0908         if (err && err != -EINVAL)
0909             return err;
0910     }
0911 
0912     return 0;
0913 }