Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
0003  *
0004  * Permission to use, copy, modify, and/or distribute this software for any
0005  * purpose with or without fee is hereby granted, provided that the above
0006  * copyright notice and this permission notice appear in all copies.
0007  *
0008  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
0009  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
0010  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
0011  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
0012  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
0013  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
0014  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
0015  */
0016 
0017 /* DXE - DMA transfer engine
0018  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
0019  * through low channels data packets are transfered
0020  * through high channels managment packets are transfered
0021  */
0022 
0023 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0024 
0025 #include <linux/interrupt.h>
0026 #include <linux/soc/qcom/smem_state.h>
0027 #include "wcn36xx.h"
0028 #include "txrx.h"
0029 
0030 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
0031 {
0032     wcn36xx_dbg(WCN36XX_DBG_DXE,
0033             "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
0034             addr, data);
0035 
0036     writel(data, wcn->ccu_base + addr);
0037 }
0038 
0039 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
0040 {
0041     wcn36xx_dbg(WCN36XX_DBG_DXE,
0042             "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
0043             addr, data);
0044 
0045     writel(data, wcn->dxe_base + addr);
0046 }
0047 
0048 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
0049 {
0050     *data = readl(wcn->dxe_base + addr);
0051 
0052     wcn36xx_dbg(WCN36XX_DBG_DXE,
0053             "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
0054             addr, *data);
0055 }
0056 
0057 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
0058 {
0059     struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
0060     int i;
0061 
0062     for (i = 0; i < ch->desc_num && ctl; i++) {
0063         next = ctl->next;
0064         kfree(ctl);
0065         ctl = next;
0066     }
0067 }
0068 
0069 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
0070 {
0071     struct wcn36xx_dxe_ctl *prev_ctl = NULL;
0072     struct wcn36xx_dxe_ctl *cur_ctl = NULL;
0073     int i;
0074 
0075     spin_lock_init(&ch->lock);
0076     for (i = 0; i < ch->desc_num; i++) {
0077         cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
0078         if (!cur_ctl)
0079             goto out_fail;
0080 
0081         cur_ctl->ctl_blk_order = i;
0082         if (i == 0) {
0083             ch->head_blk_ctl = cur_ctl;
0084             ch->tail_blk_ctl = cur_ctl;
0085         } else if (ch->desc_num - 1 == i) {
0086             prev_ctl->next = cur_ctl;
0087             cur_ctl->next = ch->head_blk_ctl;
0088         } else {
0089             prev_ctl->next = cur_ctl;
0090         }
0091         prev_ctl = cur_ctl;
0092     }
0093 
0094     return 0;
0095 
0096 out_fail:
0097     wcn36xx_dxe_free_ctl_block(ch);
0098     return -ENOMEM;
0099 }
0100 
0101 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
0102 {
0103     int ret;
0104 
0105     wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
0106     wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
0107     wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
0108     wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
0109 
0110     wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
0111     wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
0112     wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
0113     wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
0114 
0115     wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
0116     wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
0117 
0118     wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
0119     wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
0120 
0121     wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
0122     wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
0123 
0124     wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
0125     wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
0126 
0127     wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
0128     wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
0129 
0130     /* DXE control block allocation */
0131     ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
0132     if (ret)
0133         goto out_err;
0134     ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
0135     if (ret)
0136         goto out_err;
0137     ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
0138     if (ret)
0139         goto out_err;
0140     ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
0141     if (ret)
0142         goto out_err;
0143 
0144     /* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
0145     ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
0146                       WCN36XX_SMSM_WLAN_TX_ENABLE |
0147                       WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
0148                       WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
0149     if (ret)
0150         goto out_err;
0151 
0152     return 0;
0153 
0154 out_err:
0155     wcn36xx_err("Failed to allocate DXE control blocks\n");
0156     wcn36xx_dxe_free_ctl_blks(wcn);
0157     return -ENOMEM;
0158 }
0159 
0160 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
0161 {
0162     wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
0163     wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
0164     wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
0165     wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
0166 }
0167 
0168 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
0169 {
0170     struct wcn36xx_dxe_desc *cur_dxe = NULL;
0171     struct wcn36xx_dxe_desc *prev_dxe = NULL;
0172     struct wcn36xx_dxe_ctl *cur_ctl = NULL;
0173     size_t size;
0174     int i;
0175 
0176     size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
0177     wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
0178                           GFP_KERNEL);
0179     if (!wcn_ch->cpu_addr)
0180         return -ENOMEM;
0181 
0182     cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
0183     cur_ctl = wcn_ch->head_blk_ctl;
0184 
0185     for (i = 0; i < wcn_ch->desc_num; i++) {
0186         cur_ctl->desc = cur_dxe;
0187         cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
0188             i * sizeof(struct wcn36xx_dxe_desc);
0189 
0190         switch (wcn_ch->ch_type) {
0191         case WCN36XX_DXE_CH_TX_L:
0192             cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
0193             cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
0194             break;
0195         case WCN36XX_DXE_CH_TX_H:
0196             cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
0197             cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
0198             break;
0199         case WCN36XX_DXE_CH_RX_L:
0200             cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
0201             cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
0202             break;
0203         case WCN36XX_DXE_CH_RX_H:
0204             cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
0205             cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
0206             break;
0207         }
0208         if (0 == i) {
0209             cur_dxe->phy_next_l = 0;
0210         } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
0211             prev_dxe->phy_next_l =
0212                 cur_ctl->desc_phy_addr;
0213         } else if (i == (wcn_ch->desc_num - 1)) {
0214             prev_dxe->phy_next_l =
0215                 cur_ctl->desc_phy_addr;
0216             cur_dxe->phy_next_l =
0217                 wcn_ch->head_blk_ctl->desc_phy_addr;
0218         }
0219         cur_ctl = cur_ctl->next;
0220         prev_dxe = cur_dxe;
0221         cur_dxe++;
0222     }
0223 
0224     return 0;
0225 }
0226 
0227 static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
0228 {
0229     size_t size;
0230 
0231     size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
0232     dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
0233 }
0234 
0235 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
0236                    struct wcn36xx_dxe_mem_pool *pool)
0237 {
0238     int i, chunk_size = pool->chunk_size;
0239     dma_addr_t bd_phy_addr = pool->phy_addr;
0240     void *bd_cpu_addr = pool->virt_addr;
0241     struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
0242 
0243     for (i = 0; i < ch->desc_num; i++) {
0244         /* Only every second dxe needs a bd pointer,
0245            the other will point to the skb data */
0246         if (!(i & 1)) {
0247             cur->bd_phy_addr = bd_phy_addr;
0248             cur->bd_cpu_addr = bd_cpu_addr;
0249             bd_phy_addr += chunk_size;
0250             bd_cpu_addr += chunk_size;
0251         } else {
0252             cur->bd_phy_addr = 0;
0253             cur->bd_cpu_addr = NULL;
0254         }
0255         cur = cur->next;
0256     }
0257 }
0258 
0259 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
0260 {
0261     int reg_data = 0;
0262 
0263     wcn36xx_dxe_read_register(wcn,
0264                   WCN36XX_DXE_INT_MASK_REG,
0265                   &reg_data);
0266 
0267     reg_data |= wcn_ch;
0268 
0269     wcn36xx_dxe_write_register(wcn,
0270                    WCN36XX_DXE_INT_MASK_REG,
0271                    (int)reg_data);
0272     return 0;
0273 }
0274 
0275 static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
0276 {
0277     int reg_data = 0;
0278 
0279     wcn36xx_dxe_read_register(wcn,
0280                   WCN36XX_DXE_INT_MASK_REG,
0281                   &reg_data);
0282 
0283     reg_data &= ~wcn_ch;
0284 
0285     wcn36xx_dxe_write_register(wcn,
0286                    WCN36XX_DXE_INT_MASK_REG,
0287                    (int)reg_data);
0288 }
0289 
0290 static int wcn36xx_dxe_fill_skb(struct device *dev,
0291                 struct wcn36xx_dxe_ctl *ctl,
0292                 gfp_t gfp)
0293 {
0294     struct wcn36xx_dxe_desc *dxe = ctl->desc;
0295     struct sk_buff *skb;
0296 
0297     skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
0298     if (skb == NULL)
0299         return -ENOMEM;
0300 
0301     dxe->dst_addr_l = dma_map_single(dev,
0302                      skb_tail_pointer(skb),
0303                      WCN36XX_PKT_SIZE,
0304                      DMA_FROM_DEVICE);
0305     if (dma_mapping_error(dev, dxe->dst_addr_l)) {
0306         dev_err(dev, "unable to map skb\n");
0307         kfree_skb(skb);
0308         return -ENOMEM;
0309     }
0310     ctl->skb = skb;
0311 
0312     return 0;
0313 }
0314 
0315 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
0316                     struct wcn36xx_dxe_ch *wcn_ch)
0317 {
0318     int i;
0319     struct wcn36xx_dxe_ctl *cur_ctl = NULL;
0320 
0321     cur_ctl = wcn_ch->head_blk_ctl;
0322 
0323     for (i = 0; i < wcn_ch->desc_num; i++) {
0324         wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
0325         cur_ctl = cur_ctl->next;
0326     }
0327 
0328     return 0;
0329 }
0330 
0331 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
0332                      struct wcn36xx_dxe_ch *wcn_ch)
0333 {
0334     struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
0335     int i;
0336 
0337     for (i = 0; i < wcn_ch->desc_num; i++) {
0338         kfree_skb(cur->skb);
0339         cur = cur->next;
0340     }
0341 }
0342 
0343 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
0344 {
0345     struct ieee80211_tx_info *info;
0346     struct sk_buff *skb;
0347     unsigned long flags;
0348 
0349     spin_lock_irqsave(&wcn->dxe_lock, flags);
0350     skb = wcn->tx_ack_skb;
0351     wcn->tx_ack_skb = NULL;
0352     del_timer(&wcn->tx_ack_timer);
0353     spin_unlock_irqrestore(&wcn->dxe_lock, flags);
0354 
0355     if (!skb) {
0356         wcn36xx_warn("Spurious TX complete indication\n");
0357         return;
0358     }
0359 
0360     info = IEEE80211_SKB_CB(skb);
0361 
0362     if (status == 1)
0363         info->flags |= IEEE80211_TX_STAT_ACK;
0364     else
0365         info->flags &= ~IEEE80211_TX_STAT_ACK;
0366 
0367     wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
0368 
0369     ieee80211_tx_status_irqsafe(wcn->hw, skb);
0370     ieee80211_wake_queues(wcn->hw);
0371 }
0372 
0373 static void wcn36xx_dxe_tx_timer(struct timer_list *t)
0374 {
0375     struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
0376     struct ieee80211_tx_info *info;
0377     unsigned long flags;
0378     struct sk_buff *skb;
0379 
0380     /* TX Timeout */
0381     wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
0382 
0383     spin_lock_irqsave(&wcn->dxe_lock, flags);
0384     skb = wcn->tx_ack_skb;
0385     wcn->tx_ack_skb = NULL;
0386     spin_unlock_irqrestore(&wcn->dxe_lock, flags);
0387 
0388     if (!skb)
0389         return;
0390 
0391     info = IEEE80211_SKB_CB(skb);
0392     info->flags &= ~IEEE80211_TX_STAT_ACK;
0393     info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
0394 
0395     ieee80211_tx_status_irqsafe(wcn->hw, skb);
0396     ieee80211_wake_queues(wcn->hw);
0397 }
0398 
0399 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
0400 {
0401     struct wcn36xx_dxe_ctl *ctl;
0402     struct ieee80211_tx_info *info;
0403     unsigned long flags;
0404 
0405     /*
0406      * Make at least one loop of do-while because in case ring is
0407      * completely full head and tail are pointing to the same element
0408      * and while-do will not make any cycles.
0409      */
0410     spin_lock_irqsave(&ch->lock, flags);
0411     ctl = ch->tail_blk_ctl;
0412     do {
0413         if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
0414             break;
0415 
0416         if (ctl->skb &&
0417             READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
0418             dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
0419                      ctl->skb->len, DMA_TO_DEVICE);
0420             info = IEEE80211_SKB_CB(ctl->skb);
0421             if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
0422                 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
0423                     info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
0424                     ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
0425                 } else {
0426                     /* Wait for the TX ack indication or timeout... */
0427                     spin_lock(&wcn->dxe_lock);
0428                     if (WARN_ON(wcn->tx_ack_skb))
0429                         ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
0430                     wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
0431                     mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
0432                     spin_unlock(&wcn->dxe_lock);
0433                 }
0434                 /* do not free, ownership transferred to mac80211 status cb */
0435             } else {
0436                 ieee80211_free_txskb(wcn->hw, ctl->skb);
0437             }
0438 
0439             if (wcn->queues_stopped) {
0440                 wcn->queues_stopped = false;
0441                 ieee80211_wake_queues(wcn->hw);
0442             }
0443 
0444             ctl->skb = NULL;
0445         }
0446         ctl = ctl->next;
0447     } while (ctl != ch->head_blk_ctl);
0448 
0449     ch->tail_blk_ctl = ctl;
0450     spin_unlock_irqrestore(&ch->lock, flags);
0451 }
0452 
0453 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
0454 {
0455     struct wcn36xx *wcn = (struct wcn36xx *)dev;
0456     int int_src, int_reason;
0457 
0458     wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
0459 
0460     if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
0461         wcn36xx_dxe_read_register(wcn,
0462                       WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
0463                       &int_reason);
0464 
0465         wcn36xx_dxe_write_register(wcn,
0466                        WCN36XX_DXE_0_INT_CLR,
0467                        WCN36XX_INT_MASK_CHAN_TX_H);
0468 
0469         if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
0470             wcn36xx_dxe_write_register(wcn,
0471                            WCN36XX_DXE_0_INT_ERR_CLR,
0472                            WCN36XX_INT_MASK_CHAN_TX_H);
0473 
0474             wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
0475                     int_src);
0476         }
0477 
0478         if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
0479             wcn36xx_dxe_write_register(wcn,
0480                            WCN36XX_DXE_0_INT_DONE_CLR,
0481                            WCN36XX_INT_MASK_CHAN_TX_H);
0482         }
0483 
0484         if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
0485             wcn36xx_dxe_write_register(wcn,
0486                            WCN36XX_DXE_0_INT_ED_CLR,
0487                            WCN36XX_INT_MASK_CHAN_TX_H);
0488         }
0489 
0490         wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
0491                 int_reason);
0492 
0493         if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
0494                   WCN36XX_CH_STAT_INT_ED_MASK)) {
0495             reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
0496         }
0497     }
0498 
0499     if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
0500         wcn36xx_dxe_read_register(wcn,
0501                       WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
0502                       &int_reason);
0503 
0504         wcn36xx_dxe_write_register(wcn,
0505                        WCN36XX_DXE_0_INT_CLR,
0506                        WCN36XX_INT_MASK_CHAN_TX_L);
0507 
0508         if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
0509             wcn36xx_dxe_write_register(wcn,
0510                            WCN36XX_DXE_0_INT_ERR_CLR,
0511                            WCN36XX_INT_MASK_CHAN_TX_L);
0512 
0513             wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
0514                     int_src);
0515         }
0516 
0517         if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
0518             wcn36xx_dxe_write_register(wcn,
0519                            WCN36XX_DXE_0_INT_DONE_CLR,
0520                            WCN36XX_INT_MASK_CHAN_TX_L);
0521         }
0522 
0523         if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
0524             wcn36xx_dxe_write_register(wcn,
0525                            WCN36XX_DXE_0_INT_ED_CLR,
0526                            WCN36XX_INT_MASK_CHAN_TX_L);
0527         }
0528 
0529         wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
0530                 int_reason);
0531 
0532         if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
0533                   WCN36XX_CH_STAT_INT_ED_MASK)) {
0534             reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
0535         }
0536     }
0537 
0538     return IRQ_HANDLED;
0539 }
0540 
0541 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
0542 {
0543     struct wcn36xx *wcn = (struct wcn36xx *)dev;
0544 
0545     wcn36xx_dxe_rx_frame(wcn);
0546 
0547     return IRQ_HANDLED;
0548 }
0549 
0550 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
0551 {
0552     int ret;
0553 
0554     ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
0555               IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
0556     if (ret) {
0557         wcn36xx_err("failed to alloc tx irq\n");
0558         goto out_err;
0559     }
0560 
0561     ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
0562               "wcn36xx_rx", wcn);
0563     if (ret) {
0564         wcn36xx_err("failed to alloc rx irq\n");
0565         goto out_txirq;
0566     }
0567 
0568     enable_irq_wake(wcn->rx_irq);
0569 
0570     return 0;
0571 
0572 out_txirq:
0573     free_irq(wcn->tx_irq, wcn);
0574 out_err:
0575     return ret;
0576 
0577 }
0578 
0579 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
0580                      struct wcn36xx_dxe_ch *ch,
0581                      u32 ctrl,
0582                      u32 en_mask,
0583                      u32 int_mask,
0584                      u32 status_reg)
0585 {
0586     struct wcn36xx_dxe_desc *dxe;
0587     struct wcn36xx_dxe_ctl *ctl;
0588     dma_addr_t  dma_addr;
0589     struct sk_buff *skb;
0590     u32 int_reason;
0591     int ret;
0592 
0593     wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
0594     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
0595 
0596     if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
0597         wcn36xx_dxe_write_register(wcn,
0598                        WCN36XX_DXE_0_INT_ERR_CLR,
0599                        int_mask);
0600 
0601         wcn36xx_err("DXE IRQ reported error on RX channel\n");
0602     }
0603 
0604     if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
0605         wcn36xx_dxe_write_register(wcn,
0606                        WCN36XX_DXE_0_INT_DONE_CLR,
0607                        int_mask);
0608 
0609     if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
0610         wcn36xx_dxe_write_register(wcn,
0611                        WCN36XX_DXE_0_INT_ED_CLR,
0612                        int_mask);
0613 
0614     if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
0615                 WCN36XX_CH_STAT_INT_ED_MASK)))
0616         return 0;
0617 
0618     spin_lock(&ch->lock);
0619 
0620     ctl = ch->head_blk_ctl;
0621     dxe = ctl->desc;
0622 
0623     while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
0624         /* do not read until we own DMA descriptor */
0625         dma_rmb();
0626 
0627         /* read/modify DMA descriptor */
0628         skb = ctl->skb;
0629         dma_addr = dxe->dst_addr_l;
0630         ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
0631         if (0 == ret) {
0632             /* new skb allocation ok. Use the new one and queue
0633              * the old one to network system.
0634              */
0635             dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
0636                     DMA_FROM_DEVICE);
0637             wcn36xx_rx_skb(wcn, skb);
0638         }
0639         /* else keep old skb not submitted and reuse it for rx DMA
0640          * (dropping the packet that it contained)
0641          */
0642 
0643         /* flush descriptor changes before re-marking as valid */
0644         dma_wmb();
0645         dxe->ctrl = ctrl;
0646 
0647         ctl = ctl->next;
0648         dxe = ctl->desc;
0649     }
0650     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
0651 
0652     ch->head_blk_ctl = ctl;
0653 
0654     spin_unlock(&ch->lock);
0655 
0656     return 0;
0657 }
0658 
0659 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
0660 {
0661     int int_src;
0662 
0663     wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
0664 
0665     /* RX_LOW_PRI */
0666     if (int_src & WCN36XX_DXE_INT_CH1_MASK)
0667         wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
0668                       WCN36XX_DXE_CTRL_RX_L,
0669                       WCN36XX_DXE_INT_CH1_MASK,
0670                       WCN36XX_INT_MASK_CHAN_RX_L,
0671                       WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
0672 
0673     /* RX_HIGH_PRI */
0674     if (int_src & WCN36XX_DXE_INT_CH3_MASK)
0675         wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
0676                       WCN36XX_DXE_CTRL_RX_H,
0677                       WCN36XX_DXE_INT_CH3_MASK,
0678                       WCN36XX_INT_MASK_CHAN_RX_H,
0679                       WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
0680 
0681     if (!int_src)
0682         wcn36xx_warn("No DXE interrupt pending\n");
0683 }
0684 
0685 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
0686 {
0687     size_t s;
0688     void *cpu_addr;
0689 
0690     /* Allocate BD headers for MGMT frames */
0691 
0692     /* Where this come from ask QC */
0693     wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
0694         16 - (WCN36XX_BD_CHUNK_SIZE % 8);
0695 
0696     s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
0697     cpu_addr = dma_alloc_coherent(wcn->dev, s,
0698                       &wcn->mgmt_mem_pool.phy_addr,
0699                       GFP_KERNEL);
0700     if (!cpu_addr)
0701         goto out_err;
0702 
0703     wcn->mgmt_mem_pool.virt_addr = cpu_addr;
0704 
0705     /* Allocate BD headers for DATA frames */
0706 
0707     /* Where this come from ask QC */
0708     wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
0709         16 - (WCN36XX_BD_CHUNK_SIZE % 8);
0710 
0711     s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
0712     cpu_addr = dma_alloc_coherent(wcn->dev, s,
0713                       &wcn->data_mem_pool.phy_addr,
0714                       GFP_KERNEL);
0715     if (!cpu_addr)
0716         goto out_err;
0717 
0718     wcn->data_mem_pool.virt_addr = cpu_addr;
0719 
0720     return 0;
0721 
0722 out_err:
0723     wcn36xx_dxe_free_mem_pools(wcn);
0724     wcn36xx_err("Failed to allocate BD mempool\n");
0725     return -ENOMEM;
0726 }
0727 
0728 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
0729 {
0730     if (wcn->mgmt_mem_pool.virt_addr)
0731         dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
0732                   WCN36XX_DXE_CH_DESC_NUMB_TX_H,
0733                   wcn->mgmt_mem_pool.virt_addr,
0734                   wcn->mgmt_mem_pool.phy_addr);
0735 
0736     if (wcn->data_mem_pool.virt_addr) {
0737         dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
0738                   WCN36XX_DXE_CH_DESC_NUMB_TX_L,
0739                   wcn->data_mem_pool.virt_addr,
0740                   wcn->data_mem_pool.phy_addr);
0741     }
0742 }
0743 
0744 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
0745              struct wcn36xx_vif *vif_priv,
0746              struct wcn36xx_tx_bd *bd,
0747              struct sk_buff *skb,
0748              bool is_low)
0749 {
0750     struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
0751     struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
0752     struct wcn36xx_dxe_ch *ch = NULL;
0753     unsigned long flags;
0754     int ret;
0755 
0756     ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
0757 
0758     spin_lock_irqsave(&ch->lock, flags);
0759     ctl_bd = ch->head_blk_ctl;
0760     ctl_skb = ctl_bd->next;
0761 
0762     /*
0763      * If skb is not null that means that we reached the tail of the ring
0764      * hence ring is full. Stop queues to let mac80211 back off until ring
0765      * has an empty slot again.
0766      */
0767     if (NULL != ctl_skb->skb) {
0768         ieee80211_stop_queues(wcn->hw);
0769         wcn->queues_stopped = true;
0770         spin_unlock_irqrestore(&ch->lock, flags);
0771         return -EBUSY;
0772     }
0773 
0774     if (unlikely(ctl_skb->bd_cpu_addr)) {
0775         wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
0776         ret = -EINVAL;
0777         goto unlock;
0778     }
0779 
0780     desc_bd = ctl_bd->desc;
0781     desc_skb = ctl_skb->desc;
0782 
0783     ctl_bd->skb = NULL;
0784 
0785     /* write buffer descriptor */
0786     memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
0787 
0788     /* Set source address of the BD we send */
0789     desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
0790     desc_bd->dst_addr_l = ch->dxe_wq;
0791     desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
0792 
0793     wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
0794 
0795     wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
0796              (char *)desc_bd, sizeof(*desc_bd));
0797     wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
0798              "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
0799              sizeof(struct wcn36xx_tx_bd));
0800 
0801     desc_skb->src_addr_l = dma_map_single(wcn->dev,
0802                           skb->data,
0803                           skb->len,
0804                           DMA_TO_DEVICE);
0805     if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
0806         dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
0807         ret = -ENOMEM;
0808         goto unlock;
0809     }
0810 
0811     ctl_skb->skb = skb;
0812     desc_skb->dst_addr_l = ch->dxe_wq;
0813     desc_skb->fr_len = ctl_skb->skb->len;
0814 
0815     wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
0816              (char *)desc_skb, sizeof(*desc_skb));
0817     wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
0818              (char *)ctl_skb->skb->data, ctl_skb->skb->len);
0819 
0820     /* Move the head of the ring to the next empty descriptor */
0821     ch->head_blk_ctl = ctl_skb->next;
0822 
0823     /* Commit all previous writes and set descriptors to VALID */
0824     wmb();
0825     desc_skb->ctrl = ch->ctrl_skb;
0826     wmb();
0827     desc_bd->ctrl = ch->ctrl_bd;
0828 
0829     /*
0830      * When connected and trying to send data frame chip can be in sleep
0831      * mode and writing to the register will not wake up the chip. Instead
0832      * notify chip about new frame through SMSM bus.
0833      */
0834     if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
0835         qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
0836                         WCN36XX_SMSM_WLAN_TX_ENABLE,
0837                         WCN36XX_SMSM_WLAN_TX_ENABLE);
0838     } else {
0839         /* indicate End Of Packet and generate interrupt on descriptor
0840          * done.
0841          */
0842         wcn36xx_dxe_write_register(wcn,
0843             ch->reg_ctrl, ch->def_ctrl);
0844     }
0845 
0846     ret = 0;
0847 unlock:
0848     spin_unlock_irqrestore(&ch->lock, flags);
0849     return ret;
0850 }
0851 
0852 static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
0853 {
0854     unsigned long flags;
0855     struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
0856     struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
0857     bool ret = true;
0858 
0859     spin_lock_irqsave(&ch->lock, flags);
0860 
0861     /* Loop through ring buffer looking for nonempty entries. */
0862     ctl_bd_start = ch->head_blk_ctl;
0863     ctl_bd = ctl_bd_start;
0864     ctl_skb_start = ctl_bd_start->next;
0865     ctl_skb = ctl_skb_start;
0866     do {
0867         if (ctl_skb->skb) {
0868             ret = false;
0869             goto unlock;
0870         }
0871         ctl_bd = ctl_skb->next;
0872         ctl_skb = ctl_bd->next;
0873     } while (ctl_skb != ctl_skb_start);
0874 
0875 unlock:
0876     spin_unlock_irqrestore(&ch->lock, flags);
0877     return ret;
0878 }
0879 
0880 int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
0881 {
0882     int i = 0;
0883 
0884     /* Called with mac80211 queues stopped. Wait for empty HW queues. */
0885     do {
0886         if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
0887             _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
0888             return 0;
0889         }
0890         /* This ieee80211_ops callback is specifically allowed to
0891          * sleep.
0892          */
0893         usleep_range(1000, 1100);
0894     } while (++i < 100);
0895 
0896     return -EBUSY;
0897 }
0898 
0899 int wcn36xx_dxe_init(struct wcn36xx *wcn)
0900 {
0901     int reg_data = 0, ret;
0902 
0903     reg_data = WCN36XX_DXE_REG_RESET;
0904     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
0905 
0906     /* Select channels for rx avail and xfer done interrupts... */
0907     reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
0908             WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
0909     if (wcn->is_pronto)
0910         wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
0911     else
0912         wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
0913 
0914     /***************************************/
0915     /* Init descriptors for TX LOW channel */
0916     /***************************************/
0917     ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
0918     if (ret) {
0919         dev_err(wcn->dev, "Error allocating descriptor\n");
0920         return ret;
0921     }
0922     wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
0923 
0924     /* Write channel head to a NEXT register */
0925     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
0926         wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
0927 
0928     /* Program DMA destination addr for TX LOW */
0929     wcn36xx_dxe_write_register(wcn,
0930         WCN36XX_DXE_CH_DEST_ADDR_TX_L,
0931         WCN36XX_DXE_WQ_TX_L);
0932 
0933     wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
0934 
0935     /***************************************/
0936     /* Init descriptors for TX HIGH channel */
0937     /***************************************/
0938     ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
0939     if (ret) {
0940         dev_err(wcn->dev, "Error allocating descriptor\n");
0941         goto out_err_txh_ch;
0942     }
0943 
0944     wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
0945 
0946     /* Write channel head to a NEXT register */
0947     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
0948         wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
0949 
0950     /* Program DMA destination addr for TX HIGH */
0951     wcn36xx_dxe_write_register(wcn,
0952         WCN36XX_DXE_CH_DEST_ADDR_TX_H,
0953         WCN36XX_DXE_WQ_TX_H);
0954 
0955     wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
0956 
0957     /***************************************/
0958     /* Init descriptors for RX LOW channel */
0959     /***************************************/
0960     ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
0961     if (ret) {
0962         dev_err(wcn->dev, "Error allocating descriptor\n");
0963         goto out_err_rxl_ch;
0964     }
0965 
0966     /* For RX we need to preallocated buffers */
0967     wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
0968 
0969     /* Write channel head to a NEXT register */
0970     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
0971         wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
0972 
0973     /* Write DMA source address */
0974     wcn36xx_dxe_write_register(wcn,
0975         WCN36XX_DXE_CH_SRC_ADDR_RX_L,
0976         WCN36XX_DXE_WQ_RX_L);
0977 
0978     /* Program preallocated destination address */
0979     wcn36xx_dxe_write_register(wcn,
0980         WCN36XX_DXE_CH_DEST_ADDR_RX_L,
0981         wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
0982 
0983     /* Enable default control registers */
0984     wcn36xx_dxe_write_register(wcn,
0985         WCN36XX_DXE_REG_CTL_RX_L,
0986         WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
0987 
0988     /***************************************/
0989     /* Init descriptors for RX HIGH channel */
0990     /***************************************/
0991     ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
0992     if (ret) {
0993         dev_err(wcn->dev, "Error allocating descriptor\n");
0994         goto out_err_rxh_ch;
0995     }
0996 
0997     /* For RX we need to prealocat buffers */
0998     wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
0999 
1000     /* Write chanel head to a NEXT register */
1001     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
1002         wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
1003 
1004     /* Write DMA source address */
1005     wcn36xx_dxe_write_register(wcn,
1006         WCN36XX_DXE_CH_SRC_ADDR_RX_H,
1007         WCN36XX_DXE_WQ_RX_H);
1008 
1009     /* Program preallocated destination address */
1010     wcn36xx_dxe_write_register(wcn,
1011         WCN36XX_DXE_CH_DEST_ADDR_RX_H,
1012          wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
1013 
1014     /* Enable default control registers */
1015     wcn36xx_dxe_write_register(wcn,
1016         WCN36XX_DXE_REG_CTL_RX_H,
1017         WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
1018 
1019     ret = wcn36xx_dxe_request_irqs(wcn);
1020     if (ret < 0)
1021         goto out_err_irq;
1022 
1023     timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
1024 
1025     /* Enable channel interrupts */
1026     wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1027     wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1028     wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1029     wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1030 
1031     return 0;
1032 
1033 out_err_irq:
1034     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1035 out_err_rxh_ch:
1036     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1037 out_err_rxl_ch:
1038     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1039 out_err_txh_ch:
1040     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1041 
1042     return ret;
1043 }
1044 
1045 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
1046 {
1047     int reg_data = 0;
1048 
1049     /* Disable channel interrupts */
1050     wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
1051     wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
1052     wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
1053     wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
1054 
1055     free_irq(wcn->tx_irq, wcn);
1056     free_irq(wcn->rx_irq, wcn);
1057     del_timer(&wcn->tx_ack_timer);
1058 
1059     if (wcn->tx_ack_skb) {
1060         ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
1061         wcn->tx_ack_skb = NULL;
1062     }
1063 
1064     /* Put the DXE block into reset before freeing memory */
1065     reg_data = WCN36XX_DXE_REG_RESET;
1066     wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
1067 
1068     wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
1069     wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
1070 
1071     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
1072     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
1073     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
1074     wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
1075 }