Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2008-2011 Atheros Communications Inc.
0003  *
0004  * Permission to use, copy, modify, and/or distribute this software for any
0005  * purpose with or without fee is hereby granted, provided that the above
0006  * copyright notice and this permission notice appear in all copies.
0007  *
0008  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
0009  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
0010  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
0011  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
0012  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
0013  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
0014  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0015  */
0016 
0017 #include "hw.h"
0018 #include "hw-ops.h"
0019 #include <linux/export.h>
0020 
0021 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
0022                     struct ath9k_tx_queue_info *qi)
0023 {
0024     ath_dbg(ath9k_hw_common(ah), INTERRUPT,
0025         "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
0026         ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
0027         ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
0028         ah->txurn_interrupt_mask);
0029 
0030     ENABLE_REGWRITE_BUFFER(ah);
0031 
0032     REG_WRITE(ah, AR_IMR_S0,
0033           SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
0034           | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
0035     REG_WRITE(ah, AR_IMR_S1,
0036           SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
0037           | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
0038 
0039     ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
0040     ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
0041     REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
0042 
0043     REGWRITE_BUFFER_FLUSH(ah);
0044 }
0045 
0046 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
0047 {
0048     return REG_READ(ah, AR_QTXDP(q));
0049 }
0050 EXPORT_SYMBOL(ath9k_hw_gettxbuf);
0051 
0052 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
0053 {
0054     REG_WRITE(ah, AR_QTXDP(q), txdp);
0055 }
0056 EXPORT_SYMBOL(ath9k_hw_puttxbuf);
0057 
0058 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
0059 {
0060     ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
0061     REG_WRITE(ah, AR_Q_TXE, 1 << q);
0062 }
0063 EXPORT_SYMBOL(ath9k_hw_txstart);
0064 
0065 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
0066 {
0067     u32 npend;
0068 
0069     npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
0070     if (npend == 0) {
0071 
0072         if (REG_READ(ah, AR_Q_TXE) & (1 << q))
0073             npend = 1;
0074     }
0075 
0076     return npend;
0077 }
0078 EXPORT_SYMBOL(ath9k_hw_numtxpending);
0079 
0080 /**
0081  * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
0082  *
0083  * @ah: atheros hardware struct
0084  * @bIncTrigLevel: whether or not the frame trigger level should be updated
0085  *
0086  * The frame trigger level specifies the minimum number of bytes,
0087  * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
0088  * before the PCU will initiate sending the frame on the air. This can
0089  * mean we initiate transmit before a full frame is on the PCU TX FIFO.
0090  * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
0091  * first)
0092  *
0093  * Caution must be taken to ensure to set the frame trigger level based
0094  * on the DMA request size. For example if the DMA request size is set to
0095  * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
0096  * there need to be enough space in the tx FIFO for the requested transfer
0097  * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
0098  * the threshold to a value beyond 6, then the transmit will hang.
0099  *
0100  * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
0101  * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
0102  * there is a hardware issue which forces us to use 2 KB instead so the
0103  * frame trigger level must not exceed 2 KB for these chipsets.
0104  */
0105 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
0106 {
0107     u32 txcfg, curLevel, newLevel;
0108 
0109     if (ah->tx_trig_level >= ah->config.max_txtrig_level)
0110         return false;
0111 
0112     ath9k_hw_disable_interrupts(ah);
0113 
0114     txcfg = REG_READ(ah, AR_TXCFG);
0115     curLevel = MS(txcfg, AR_FTRIG);
0116     newLevel = curLevel;
0117     if (bIncTrigLevel) {
0118         if (curLevel < ah->config.max_txtrig_level)
0119             newLevel++;
0120     } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
0121         newLevel--;
0122     if (newLevel != curLevel)
0123         REG_WRITE(ah, AR_TXCFG,
0124               (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
0125 
0126     ath9k_hw_enable_interrupts(ah);
0127 
0128     ah->tx_trig_level = newLevel;
0129 
0130     return newLevel != curLevel;
0131 }
0132 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
0133 
0134 void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
0135 {
0136     int maxdelay = 1000;
0137     int i, q;
0138 
0139     if (ah->curchan) {
0140         if (IS_CHAN_HALF_RATE(ah->curchan))
0141             maxdelay *= 2;
0142         else if (IS_CHAN_QUARTER_RATE(ah->curchan))
0143             maxdelay *= 4;
0144     }
0145 
0146     REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
0147 
0148     REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
0149     REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
0150     REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
0151 
0152     for (q = 0; q < AR_NUM_QCU; q++) {
0153         for (i = 0; i < maxdelay; i++) {
0154             if (i)
0155                 udelay(5);
0156 
0157             if (!ath9k_hw_numtxpending(ah, q))
0158                 break;
0159         }
0160     }
0161 
0162     REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
0163     REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
0164     REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
0165 
0166     REG_WRITE(ah, AR_Q_TXD, 0);
0167 }
0168 EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
0169 
0170 bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
0171 {
0172 #define ATH9K_TX_STOP_DMA_TIMEOUT   1000    /* usec */
0173 #define ATH9K_TIME_QUANTUM      100     /* usec */
0174     int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
0175     int wait;
0176 
0177     REG_WRITE(ah, AR_Q_TXD, 1 << q);
0178 
0179     for (wait = wait_time; wait != 0; wait--) {
0180         if (wait != wait_time)
0181             udelay(ATH9K_TIME_QUANTUM);
0182 
0183         if (ath9k_hw_numtxpending(ah, q) == 0)
0184             break;
0185     }
0186 
0187     REG_WRITE(ah, AR_Q_TXD, 0);
0188 
0189     return wait != 0;
0190 
0191 #undef ATH9K_TX_STOP_DMA_TIMEOUT
0192 #undef ATH9K_TIME_QUANTUM
0193 }
0194 EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
0195 
0196 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
0197                 const struct ath9k_tx_queue_info *qinfo)
0198 {
0199     u32 cw;
0200     struct ath_common *common = ath9k_hw_common(ah);
0201     struct ath9k_tx_queue_info *qi;
0202 
0203     qi = &ah->txq[q];
0204     if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
0205         ath_dbg(common, QUEUE,
0206             "Set TXQ properties, inactive queue: %u\n", q);
0207         return false;
0208     }
0209 
0210     ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
0211 
0212     qi->tqi_ver = qinfo->tqi_ver;
0213     qi->tqi_subtype = qinfo->tqi_subtype;
0214     qi->tqi_qflags = qinfo->tqi_qflags;
0215     qi->tqi_priority = qinfo->tqi_priority;
0216     if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
0217         qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
0218     else
0219         qi->tqi_aifs = INIT_AIFS;
0220     if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
0221         cw = min(qinfo->tqi_cwmin, 1024U);
0222         qi->tqi_cwmin = 1;
0223         while (qi->tqi_cwmin < cw)
0224             qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
0225     } else
0226         qi->tqi_cwmin = qinfo->tqi_cwmin;
0227     if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
0228         cw = min(qinfo->tqi_cwmax, 1024U);
0229         qi->tqi_cwmax = 1;
0230         while (qi->tqi_cwmax < cw)
0231             qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
0232     } else
0233         qi->tqi_cwmax = INIT_CWMAX;
0234 
0235     if (qinfo->tqi_shretry != 0)
0236         qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
0237     else
0238         qi->tqi_shretry = INIT_SH_RETRY;
0239     if (qinfo->tqi_lgretry != 0)
0240         qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
0241     else
0242         qi->tqi_lgretry = INIT_LG_RETRY;
0243     qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
0244     qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
0245     qi->tqi_burstTime = qinfo->tqi_burstTime;
0246     qi->tqi_readyTime = qinfo->tqi_readyTime;
0247 
0248     switch (qinfo->tqi_subtype) {
0249     case ATH9K_WME_UPSD:
0250         if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
0251             qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
0252         break;
0253     default:
0254         break;
0255     }
0256 
0257     return true;
0258 }
0259 EXPORT_SYMBOL(ath9k_hw_set_txq_props);
0260 
0261 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
0262                 struct ath9k_tx_queue_info *qinfo)
0263 {
0264     struct ath_common *common = ath9k_hw_common(ah);
0265     struct ath9k_tx_queue_info *qi;
0266 
0267     qi = &ah->txq[q];
0268     if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
0269         ath_dbg(common, QUEUE,
0270             "Get TXQ properties, inactive queue: %u\n", q);
0271         return false;
0272     }
0273 
0274     qinfo->tqi_qflags = qi->tqi_qflags;
0275     qinfo->tqi_ver = qi->tqi_ver;
0276     qinfo->tqi_subtype = qi->tqi_subtype;
0277     qinfo->tqi_qflags = qi->tqi_qflags;
0278     qinfo->tqi_priority = qi->tqi_priority;
0279     qinfo->tqi_aifs = qi->tqi_aifs;
0280     qinfo->tqi_cwmin = qi->tqi_cwmin;
0281     qinfo->tqi_cwmax = qi->tqi_cwmax;
0282     qinfo->tqi_shretry = qi->tqi_shretry;
0283     qinfo->tqi_lgretry = qi->tqi_lgretry;
0284     qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
0285     qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
0286     qinfo->tqi_burstTime = qi->tqi_burstTime;
0287     qinfo->tqi_readyTime = qi->tqi_readyTime;
0288 
0289     return true;
0290 }
0291 EXPORT_SYMBOL(ath9k_hw_get_txq_props);
0292 
0293 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
0294               const struct ath9k_tx_queue_info *qinfo)
0295 {
0296     struct ath_common *common = ath9k_hw_common(ah);
0297     struct ath9k_tx_queue_info *qi;
0298     int q;
0299 
0300     switch (type) {
0301     case ATH9K_TX_QUEUE_BEACON:
0302         q = ATH9K_NUM_TX_QUEUES - 1;
0303         break;
0304     case ATH9K_TX_QUEUE_CAB:
0305         q = ATH9K_NUM_TX_QUEUES - 2;
0306         break;
0307     case ATH9K_TX_QUEUE_PSPOLL:
0308         q = 1;
0309         break;
0310     case ATH9K_TX_QUEUE_UAPSD:
0311         q = ATH9K_NUM_TX_QUEUES - 3;
0312         break;
0313     case ATH9K_TX_QUEUE_DATA:
0314         q = qinfo->tqi_subtype;
0315         break;
0316     default:
0317         ath_err(common, "Invalid TX queue type: %u\n", type);
0318         return -1;
0319     }
0320 
0321     ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
0322 
0323     qi = &ah->txq[q];
0324     if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
0325         ath_err(common, "TX queue: %u already active\n", q);
0326         return -1;
0327     }
0328     memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
0329     qi->tqi_type = type;
0330     qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
0331     (void) ath9k_hw_set_txq_props(ah, q, qinfo);
0332 
0333     return q;
0334 }
0335 EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
0336 
0337 static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
0338 {
0339     ah->txok_interrupt_mask &= ~(1 << q);
0340     ah->txerr_interrupt_mask &= ~(1 << q);
0341     ah->txdesc_interrupt_mask &= ~(1 << q);
0342     ah->txeol_interrupt_mask &= ~(1 << q);
0343     ah->txurn_interrupt_mask &= ~(1 << q);
0344 }
0345 
0346 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
0347 {
0348     struct ath_common *common = ath9k_hw_common(ah);
0349     struct ath9k_tx_queue_info *qi;
0350 
0351     qi = &ah->txq[q];
0352     if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
0353         ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
0354         return false;
0355     }
0356 
0357     ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
0358 
0359     qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
0360     ath9k_hw_clear_queue_interrupts(ah, q);
0361     ath9k_hw_set_txq_interrupts(ah, qi);
0362 
0363     return true;
0364 }
0365 EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
0366 
0367 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
0368 {
0369     struct ath_common *common = ath9k_hw_common(ah);
0370     struct ath9k_tx_queue_info *qi;
0371     u32 cwMin, chanCwMin, value;
0372 
0373     qi = &ah->txq[q];
0374     if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
0375         ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
0376         return true;
0377     }
0378 
0379     ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
0380 
0381     if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
0382         chanCwMin = INIT_CWMIN;
0383 
0384         for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
0385     } else
0386         cwMin = qi->tqi_cwmin;
0387 
0388     ENABLE_REGWRITE_BUFFER(ah);
0389 
0390     REG_WRITE(ah, AR_DLCL_IFS(q),
0391           SM(cwMin, AR_D_LCL_IFS_CWMIN) |
0392           SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
0393           SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
0394 
0395     REG_WRITE(ah, AR_DRETRY_LIMIT(q),
0396           SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
0397           SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
0398           SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
0399 
0400     REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
0401 
0402     if (AR_SREV_9340(ah) && !AR_SREV_9340_13_OR_LATER(ah))
0403         REG_WRITE(ah, AR_DMISC(q),
0404               AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
0405     else
0406         REG_WRITE(ah, AR_DMISC(q),
0407               AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
0408 
0409     if (qi->tqi_cbrPeriod) {
0410         REG_WRITE(ah, AR_QCBRCFG(q),
0411               SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
0412               SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
0413         REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
0414                 (qi->tqi_cbrOverflowLimit ?
0415                  AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
0416     }
0417     if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
0418         REG_WRITE(ah, AR_QRDYTIMECFG(q),
0419               SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
0420               AR_Q_RDYTIMECFG_EN);
0421     }
0422 
0423     REG_WRITE(ah, AR_DCHNTIME(q),
0424           SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
0425           (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
0426 
0427     if (qi->tqi_burstTime
0428         && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
0429         REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
0430 
0431     if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
0432         REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
0433 
0434     REGWRITE_BUFFER_FLUSH(ah);
0435 
0436     if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
0437         REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
0438 
0439     switch (qi->tqi_type) {
0440     case ATH9K_TX_QUEUE_BEACON:
0441         ENABLE_REGWRITE_BUFFER(ah);
0442 
0443         REG_SET_BIT(ah, AR_QMISC(q),
0444                 AR_Q_MISC_FSP_DBA_GATED
0445                 | AR_Q_MISC_BEACON_USE
0446                 | AR_Q_MISC_CBR_INCR_DIS1);
0447 
0448         REG_SET_BIT(ah, AR_DMISC(q),
0449                 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
0450                  AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
0451                 | AR_D_MISC_BEACON_USE
0452                 | AR_D_MISC_POST_FR_BKOFF_DIS);
0453 
0454         REGWRITE_BUFFER_FLUSH(ah);
0455 
0456         /*
0457          * cwmin and cwmax should be 0 for beacon queue
0458          * but not for IBSS as we would create an imbalance
0459          * on beaconing fairness for participating nodes.
0460          */
0461         if (AR_SREV_9300_20_OR_LATER(ah) &&
0462             ah->opmode != NL80211_IFTYPE_ADHOC) {
0463             REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
0464                   | SM(0, AR_D_LCL_IFS_CWMAX)
0465                   | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
0466         }
0467         break;
0468     case ATH9K_TX_QUEUE_CAB:
0469         ENABLE_REGWRITE_BUFFER(ah);
0470 
0471         REG_SET_BIT(ah, AR_QMISC(q),
0472                 AR_Q_MISC_FSP_DBA_GATED
0473                 | AR_Q_MISC_CBR_INCR_DIS1
0474                 | AR_Q_MISC_CBR_INCR_DIS0);
0475         value = (qi->tqi_readyTime -
0476              (ah->config.sw_beacon_response_time -
0477               ah->config.dma_beacon_response_time)) * 1024;
0478         REG_WRITE(ah, AR_QRDYTIMECFG(q),
0479               value | AR_Q_RDYTIMECFG_EN);
0480         REG_SET_BIT(ah, AR_DMISC(q),
0481                 (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
0482                  AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
0483 
0484         REGWRITE_BUFFER_FLUSH(ah);
0485 
0486         break;
0487     case ATH9K_TX_QUEUE_PSPOLL:
0488         REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
0489         break;
0490     case ATH9K_TX_QUEUE_UAPSD:
0491         REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
0492         break;
0493     default:
0494         break;
0495     }
0496 
0497     if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
0498         REG_SET_BIT(ah, AR_DMISC(q),
0499                 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
0500                    AR_D_MISC_ARB_LOCKOUT_CNTRL) |
0501                 AR_D_MISC_POST_FR_BKOFF_DIS);
0502     }
0503 
0504     if (AR_SREV_9300_20_OR_LATER(ah))
0505         REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
0506 
0507     ath9k_hw_clear_queue_interrupts(ah, q);
0508     if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
0509         ah->txok_interrupt_mask |= 1 << q;
0510         ah->txerr_interrupt_mask |= 1 << q;
0511     }
0512     if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
0513         ah->txdesc_interrupt_mask |= 1 << q;
0514     if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
0515         ah->txeol_interrupt_mask |= 1 << q;
0516     if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
0517         ah->txurn_interrupt_mask |= 1 << q;
0518     ath9k_hw_set_txq_interrupts(ah, qi);
0519 
0520     return true;
0521 }
0522 EXPORT_SYMBOL(ath9k_hw_resettxqueue);
0523 
0524 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
0525             struct ath_rx_status *rs)
0526 {
0527     struct ar5416_desc ads;
0528     struct ar5416_desc *adsp = AR5416DESC(ds);
0529     u32 phyerr;
0530 
0531     if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
0532         return -EINPROGRESS;
0533 
0534     ads.u.rx = adsp->u.rx;
0535 
0536     rs->rs_status = 0;
0537     rs->rs_flags = 0;
0538     rs->enc_flags = 0;
0539     rs->bw = RATE_INFO_BW_20;
0540 
0541     rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
0542     rs->rs_tstamp = ads.AR_RcvTimestamp;
0543 
0544     if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
0545         rs->rs_rssi = ATH9K_RSSI_BAD;
0546         rs->rs_rssi_ctl[0] = ATH9K_RSSI_BAD;
0547         rs->rs_rssi_ctl[1] = ATH9K_RSSI_BAD;
0548         rs->rs_rssi_ctl[2] = ATH9K_RSSI_BAD;
0549         rs->rs_rssi_ext[0] = ATH9K_RSSI_BAD;
0550         rs->rs_rssi_ext[1] = ATH9K_RSSI_BAD;
0551         rs->rs_rssi_ext[2] = ATH9K_RSSI_BAD;
0552     } else {
0553         rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
0554         rs->rs_rssi_ctl[0] = MS(ads.ds_rxstatus0,
0555                         AR_RxRSSIAnt00);
0556         rs->rs_rssi_ctl[1] = MS(ads.ds_rxstatus0,
0557                         AR_RxRSSIAnt01);
0558         rs->rs_rssi_ctl[2] = MS(ads.ds_rxstatus0,
0559                         AR_RxRSSIAnt02);
0560         rs->rs_rssi_ext[0] = MS(ads.ds_rxstatus4,
0561                         AR_RxRSSIAnt10);
0562         rs->rs_rssi_ext[1] = MS(ads.ds_rxstatus4,
0563                         AR_RxRSSIAnt11);
0564         rs->rs_rssi_ext[2] = MS(ads.ds_rxstatus4,
0565                         AR_RxRSSIAnt12);
0566     }
0567     if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
0568         rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
0569     else
0570         rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
0571 
0572     rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
0573     rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
0574 
0575     rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
0576     rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
0577     rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
0578     rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
0579 
0580     /* directly mapped flags for ieee80211_rx_status */
0581     rs->enc_flags |=
0582         (ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
0583     rs->bw = (ads.ds_rxstatus3 & AR_2040) ? RATE_INFO_BW_40 :
0584                         RATE_INFO_BW_20;
0585     if (AR_SREV_9280_20_OR_LATER(ah))
0586         rs->enc_flags |=
0587             (ads.ds_rxstatus3 & AR_STBC) ?
0588                 /* we can only Nss=1 STBC */
0589                 (1 << RX_ENC_FLAG_STBC_SHIFT) : 0;
0590 
0591     if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
0592         rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
0593     if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
0594         rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
0595     if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
0596         rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
0597 
0598     if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
0599         /*
0600          * Treat these errors as mutually exclusive to avoid spurious
0601          * extra error reports from the hardware. If a CRC error is
0602          * reported, then decryption and MIC errors are irrelevant,
0603          * the frame is going to be dropped either way
0604          */
0605         if (ads.ds_rxstatus8 & AR_PHYErr) {
0606             rs->rs_status |= ATH9K_RXERR_PHY;
0607             phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
0608             rs->rs_phyerr = phyerr;
0609         } else if (ads.ds_rxstatus8 & AR_CRCErr)
0610             rs->rs_status |= ATH9K_RXERR_CRC;
0611         else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
0612             rs->rs_status |= ATH9K_RXERR_DECRYPT;
0613         else if (ads.ds_rxstatus8 & AR_MichaelErr)
0614             rs->rs_status |= ATH9K_RXERR_MIC;
0615     } else {
0616         if (ads.ds_rxstatus8 &
0617             (AR_CRCErr | AR_PHYErr | AR_DecryptCRCErr | AR_MichaelErr))
0618             rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
0619 
0620         /* Only up to MCS16 supported, everything above is invalid */
0621         if (rs->rs_rate >= 0x90)
0622             rs->rs_status |= ATH9K_RXERR_CORRUPT_DESC;
0623     }
0624 
0625     if (ads.ds_rxstatus8 & AR_KeyMiss)
0626         rs->rs_status |= ATH9K_RXERR_KEYMISS;
0627 
0628     return 0;
0629 }
0630 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
0631 
0632 /*
0633  * This can stop or re-enables RX.
0634  *
0635  * If bool is set this will kill any frame which is currently being
0636  * transferred between the MAC and baseband and also prevent any new
0637  * frames from getting started.
0638  */
0639 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
0640 {
0641     u32 reg;
0642 
0643     if (set) {
0644         REG_SET_BIT(ah, AR_DIAG_SW,
0645                 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
0646 
0647         if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
0648                    0, AH_WAIT_TIMEOUT)) {
0649             REG_CLR_BIT(ah, AR_DIAG_SW,
0650                     (AR_DIAG_RX_DIS |
0651                      AR_DIAG_RX_ABORT));
0652 
0653             reg = REG_READ(ah, AR_OBS_BUS_1);
0654             ath_err(ath9k_hw_common(ah),
0655                 "RX failed to go idle in 10 ms RXSM=0x%x\n",
0656                 reg);
0657 
0658             return false;
0659         }
0660     } else {
0661         REG_CLR_BIT(ah, AR_DIAG_SW,
0662                 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
0663     }
0664 
0665     return true;
0666 }
0667 EXPORT_SYMBOL(ath9k_hw_setrxabort);
0668 
0669 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
0670 {
0671     REG_WRITE(ah, AR_RXDP, rxdp);
0672 }
0673 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
0674 
0675 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
0676 {
0677     ath9k_enable_mib_counters(ah);
0678 
0679     ath9k_ani_reset(ah, is_scanning);
0680 
0681     REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
0682 }
0683 EXPORT_SYMBOL(ath9k_hw_startpcureceive);
0684 
0685 void ath9k_hw_abortpcurecv(struct ath_hw *ah)
0686 {
0687     REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
0688 
0689     ath9k_hw_disable_mib_counters(ah);
0690 }
0691 EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
0692 
0693 bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
0694 {
0695 #define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
0696     struct ath_common *common = ath9k_hw_common(ah);
0697     u32 mac_status, last_mac_status = 0;
0698     int i;
0699 
0700     /* Enable access to the DMA observation bus */
0701     REG_WRITE(ah, AR_MACMISC,
0702           ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
0703            (AR_MACMISC_MISC_OBS_BUS_1 <<
0704             AR_MACMISC_MISC_OBS_BUS_MSB_S)));
0705 
0706     REG_WRITE(ah, AR_CR, AR_CR_RXD);
0707 
0708     /* Wait for rx enable bit to go low */
0709     for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
0710         if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
0711             break;
0712 
0713         if (!AR_SREV_9300_20_OR_LATER(ah)) {
0714             mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
0715             if (mac_status == 0x1c0 && mac_status == last_mac_status) {
0716                 *reset = true;
0717                 break;
0718             }
0719 
0720             last_mac_status = mac_status;
0721         }
0722 
0723         udelay(AH_TIME_QUANTUM);
0724     }
0725 
0726     if (i == 0) {
0727         ath_err(common,
0728             "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
0729             AH_RX_STOP_DMA_TIMEOUT / 1000,
0730             REG_READ(ah, AR_CR),
0731             REG_READ(ah, AR_DIAG_SW),
0732             REG_READ(ah, AR_DMADBG_7));
0733         return false;
0734     } else {
0735         return true;
0736     }
0737 
0738 #undef AH_RX_STOP_DMA_TIMEOUT
0739 }
0740 EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
0741 
0742 int ath9k_hw_beaconq_setup(struct ath_hw *ah)
0743 {
0744     struct ath9k_tx_queue_info qi;
0745 
0746     memset(&qi, 0, sizeof(qi));
0747     qi.tqi_aifs = 1;
0748     qi.tqi_cwmin = 0;
0749     qi.tqi_cwmax = 0;
0750 
0751     if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
0752         qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
0753 
0754     return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
0755 }
0756 EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
0757 
0758 bool ath9k_hw_intrpend(struct ath_hw *ah)
0759 {
0760     u32 host_isr;
0761 
0762     if (AR_SREV_9100(ah))
0763         return true;
0764 
0765     host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
0766 
0767     if (((host_isr & AR_INTR_MAC_IRQ) ||
0768          (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
0769         (host_isr != AR_INTR_SPURIOUS))
0770         return true;
0771 
0772     host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
0773     if ((host_isr & AR_INTR_SYNC_DEFAULT)
0774         && (host_isr != AR_INTR_SPURIOUS))
0775         return true;
0776 
0777     return false;
0778 }
0779 EXPORT_SYMBOL(ath9k_hw_intrpend);
0780 
0781 void ath9k_hw_kill_interrupts(struct ath_hw *ah)
0782 {
0783     struct ath_common *common = ath9k_hw_common(ah);
0784 
0785     ath_dbg(common, INTERRUPT, "disable IER\n");
0786     REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
0787     (void) REG_READ(ah, AR_IER);
0788     if (!AR_SREV_9100(ah)) {
0789         REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
0790         (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
0791 
0792         REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
0793         (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
0794     }
0795 }
0796 EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
0797 
0798 void ath9k_hw_disable_interrupts(struct ath_hw *ah)
0799 {
0800     if (!(ah->imask & ATH9K_INT_GLOBAL))
0801         atomic_set(&ah->intr_ref_cnt, -1);
0802     else
0803         atomic_dec(&ah->intr_ref_cnt);
0804 
0805     ath9k_hw_kill_interrupts(ah);
0806 }
0807 EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
0808 
0809 static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
0810 {
0811     struct ath_common *common = ath9k_hw_common(ah);
0812     u32 sync_default = AR_INTR_SYNC_DEFAULT;
0813     u32 async_mask;
0814 
0815     if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
0816         AR_SREV_9561(ah))
0817         sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
0818 
0819     async_mask = AR_INTR_MAC_IRQ;
0820 
0821     if (ah->imask & ATH9K_INT_MCI)
0822         async_mask |= AR_INTR_ASYNC_MASK_MCI;
0823 
0824     ath_dbg(common, INTERRUPT, "enable IER\n");
0825     REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
0826     if (!AR_SREV_9100(ah)) {
0827         REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
0828         REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
0829 
0830         REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
0831         REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
0832     }
0833     ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
0834         REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
0835 
0836     if (ah->msi_enabled) {
0837         u32 _msi_reg = 0;
0838         u32 i = 0;
0839         u32 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
0840 
0841         ath_dbg(ath9k_hw_common(ah), INTERRUPT,
0842             "Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
0843 
0844         REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
0845         REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
0846         ath_dbg(ath9k_hw_common(ah), INTERRUPT,
0847             "AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
0848             REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
0849             REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
0850 
0851         if (ah->msi_reg == 0)
0852             ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
0853 
0854         ath_dbg(ath9k_hw_common(ah), INTERRUPT,
0855             "AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
0856             AR_PCIE_MSI, ah->msi_reg);
0857 
0858         i = 0;
0859         do {
0860             REG_WRITE(ah, AR_PCIE_MSI,
0861                   (ah->msi_reg | AR_PCIE_MSI_ENABLE)
0862                   & msi_pend_addr_mask);
0863             _msi_reg = REG_READ(ah, AR_PCIE_MSI);
0864             i++;
0865         } while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
0866 
0867         if (i >= 200)
0868             ath_err(ath9k_hw_common(ah),
0869                 "%s: _msi_reg = 0x%X\n",
0870                 __func__, _msi_reg);
0871     }
0872 }
0873 
0874 void ath9k_hw_resume_interrupts(struct ath_hw *ah)
0875 {
0876     struct ath_common *common = ath9k_hw_common(ah);
0877 
0878     if (!(ah->imask & ATH9K_INT_GLOBAL))
0879         return;
0880 
0881     if (atomic_read(&ah->intr_ref_cnt) != 0) {
0882         ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
0883             atomic_read(&ah->intr_ref_cnt));
0884         return;
0885     }
0886 
0887     __ath9k_hw_enable_interrupts(ah);
0888 }
0889 EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
0890 
0891 void ath9k_hw_enable_interrupts(struct ath_hw *ah)
0892 {
0893     struct ath_common *common = ath9k_hw_common(ah);
0894 
0895     if (!(ah->imask & ATH9K_INT_GLOBAL))
0896         return;
0897 
0898     if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
0899         ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
0900             atomic_read(&ah->intr_ref_cnt));
0901         return;
0902     }
0903 
0904     __ath9k_hw_enable_interrupts(ah);
0905 }
0906 EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
0907 
0908 void ath9k_hw_set_interrupts(struct ath_hw *ah)
0909 {
0910     enum ath9k_int ints = ah->imask;
0911     u32 mask, mask2;
0912     struct ath9k_hw_capabilities *pCap = &ah->caps;
0913     struct ath_common *common = ath9k_hw_common(ah);
0914 
0915     if (!(ints & ATH9K_INT_GLOBAL))
0916         ath9k_hw_disable_interrupts(ah);
0917 
0918     if (ah->msi_enabled) {
0919         ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
0920 
0921         REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
0922         REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
0923     }
0924 
0925     ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
0926 
0927     mask = ints & ATH9K_INT_COMMON;
0928     mask2 = 0;
0929 
0930     ah->msi_mask = 0;
0931     if (ints & ATH9K_INT_TX) {
0932         ah->msi_mask |= AR_INTR_PRIO_TX;
0933         if (ah->config.tx_intr_mitigation)
0934             mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
0935         else {
0936             if (ah->txok_interrupt_mask)
0937                 mask |= AR_IMR_TXOK;
0938             if (ah->txdesc_interrupt_mask)
0939                 mask |= AR_IMR_TXDESC;
0940         }
0941         if (ah->txerr_interrupt_mask)
0942             mask |= AR_IMR_TXERR;
0943         if (ah->txeol_interrupt_mask)
0944             mask |= AR_IMR_TXEOL;
0945     }
0946     if (ints & ATH9K_INT_RX) {
0947         ah->msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
0948         if (AR_SREV_9300_20_OR_LATER(ah)) {
0949             mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
0950             if (ah->config.rx_intr_mitigation) {
0951                 mask &= ~AR_IMR_RXOK_LP;
0952                 mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
0953             } else {
0954                 mask |= AR_IMR_RXOK_LP;
0955             }
0956         } else {
0957             if (ah->config.rx_intr_mitigation)
0958                 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
0959             else
0960                 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
0961         }
0962         if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
0963             mask |= AR_IMR_GENTMR;
0964     }
0965 
0966     if (ints & ATH9K_INT_GENTIMER)
0967         mask |= AR_IMR_GENTMR;
0968 
0969     if (ints & (ATH9K_INT_BMISC)) {
0970         mask |= AR_IMR_BCNMISC;
0971         if (ints & ATH9K_INT_TIM)
0972             mask2 |= AR_IMR_S2_TIM;
0973         if (ints & ATH9K_INT_DTIM)
0974             mask2 |= AR_IMR_S2_DTIM;
0975         if (ints & ATH9K_INT_DTIMSYNC)
0976             mask2 |= AR_IMR_S2_DTIMSYNC;
0977         if (ints & ATH9K_INT_CABEND)
0978             mask2 |= AR_IMR_S2_CABEND;
0979         if (ints & ATH9K_INT_TSFOOR)
0980             mask2 |= AR_IMR_S2_TSFOOR;
0981     }
0982 
0983     if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
0984         mask |= AR_IMR_BCNMISC;
0985         if (ints & ATH9K_INT_GTT)
0986             mask2 |= AR_IMR_S2_GTT;
0987         if (ints & ATH9K_INT_CST)
0988             mask2 |= AR_IMR_S2_CST;
0989     }
0990 
0991     if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
0992         if (ints & ATH9K_INT_BB_WATCHDOG) {
0993             mask |= AR_IMR_BCNMISC;
0994             mask2 |= AR_IMR_S2_BB_WATCHDOG;
0995         }
0996     }
0997 
0998     ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
0999     REG_WRITE(ah, AR_IMR, mask);
1000     ah->imrs2_reg &= ~(AR_IMR_S2_TIM |
1001                AR_IMR_S2_DTIM |
1002                AR_IMR_S2_DTIMSYNC |
1003                AR_IMR_S2_CABEND |
1004                AR_IMR_S2_CABTO |
1005                AR_IMR_S2_TSFOOR |
1006                AR_IMR_S2_GTT |
1007                AR_IMR_S2_CST);
1008 
1009     if (ah->config.hw_hang_checks & HW_BB_WATCHDOG) {
1010         if (ints & ATH9K_INT_BB_WATCHDOG)
1011             ah->imrs2_reg &= ~AR_IMR_S2_BB_WATCHDOG;
1012     }
1013 
1014     ah->imrs2_reg |= mask2;
1015     REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
1016 
1017     if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1018         if (ints & ATH9K_INT_TIM_TIMER)
1019             REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1020         else
1021             REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1022     }
1023 
1024     return;
1025 }
1026 EXPORT_SYMBOL(ath9k_hw_set_interrupts);
1027 
1028 #define ATH9K_HW_MAX_DCU       10
1029 #define ATH9K_HW_SLICE_PER_DCU 16
1030 #define ATH9K_HW_BIT_IN_SLICE  16
1031 void ath9k_hw_set_tx_filter(struct ath_hw *ah, u8 destidx, bool set)
1032 {
1033     int dcu_idx;
1034     u32 filter;
1035 
1036     for (dcu_idx = 0; dcu_idx < 10; dcu_idx++) {
1037         filter = SM(set, AR_D_TXBLK_WRITE_COMMAND);
1038         filter |= SM(dcu_idx, AR_D_TXBLK_WRITE_DCU);
1039         filter |= SM((destidx / ATH9K_HW_SLICE_PER_DCU),
1040                  AR_D_TXBLK_WRITE_SLICE);
1041         filter |= BIT(destidx % ATH9K_HW_BIT_IN_SLICE);
1042         ath_dbg(ath9k_hw_common(ah), PS,
1043             "DCU%d staid %d set %d txfilter %08x\n",
1044             dcu_idx, destidx, set, filter);
1045         REG_WRITE(ah, AR_D_TXBLK_BASE, filter);
1046     }
1047 }
1048 EXPORT_SYMBOL(ath9k_hw_set_tx_filter);