Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 /*
0003  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
0004  */
0005 
0006 #include <linux/bits.h>
0007 #include <linux/clk.h>
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/of.h>
0011 #include <linux/of_device.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/property.h>
0014 #include <linux/regulator/consumer.h>
0015 #include <linux/remoteproc/qcom_rproc.h>
0016 #include <linux/of_address.h>
0017 #include <linux/iommu.h>
0018 
0019 #include "ce.h"
0020 #include "coredump.h"
0021 #include "debug.h"
0022 #include "hif.h"
0023 #include "htc.h"
0024 #include "snoc.h"
0025 
0026 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
0027 #define CE_POLL_PIPE 4
0028 #define ATH10K_SNOC_WAKE_IRQ 2
0029 
0030 static char *const ce_name[] = {
0031     "WLAN_CE_0",
0032     "WLAN_CE_1",
0033     "WLAN_CE_2",
0034     "WLAN_CE_3",
0035     "WLAN_CE_4",
0036     "WLAN_CE_5",
0037     "WLAN_CE_6",
0038     "WLAN_CE_7",
0039     "WLAN_CE_8",
0040     "WLAN_CE_9",
0041     "WLAN_CE_10",
0042     "WLAN_CE_11",
0043 };
0044 
0045 static const char * const ath10k_regulators[] = {
0046     "vdd-0.8-cx-mx",
0047     "vdd-1.8-xo",
0048     "vdd-1.3-rfa",
0049     "vdd-3.3-ch0",
0050     "vdd-3.3-ch1",
0051 };
0052 
0053 static const char * const ath10k_clocks[] = {
0054     "cxo_ref_clk_pin", "qdss",
0055 };
0056 
0057 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
0058 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
0059 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
0060 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
0061 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
0062 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
0063 
0064 static const struct ath10k_snoc_drv_priv drv_priv = {
0065     .hw_rev = ATH10K_HW_WCN3990,
0066     .dma_mask = DMA_BIT_MASK(35),
0067     .msa_size = 0x100000,
0068 };
0069 
0070 #define WCN3990_SRC_WR_IDX_OFFSET 0x3C
0071 #define WCN3990_DST_WR_IDX_OFFSET 0x40
0072 
0073 static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
0074         {
0075             .ce_id = __cpu_to_le16(0),
0076             .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
0077         },
0078 
0079         {
0080             .ce_id = __cpu_to_le16(3),
0081             .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
0082         },
0083 
0084         {
0085             .ce_id = __cpu_to_le16(4),
0086             .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
0087         },
0088 
0089         {
0090             .ce_id = __cpu_to_le16(5),
0091             .reg_offset =  __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
0092         },
0093 
0094         {
0095             .ce_id = __cpu_to_le16(7),
0096             .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
0097         },
0098 
0099         {
0100             .ce_id = __cpu_to_le16(1),
0101             .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0102         },
0103 
0104         {
0105             .ce_id = __cpu_to_le16(2),
0106             .reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0107         },
0108 
0109         {
0110             .ce_id = __cpu_to_le16(7),
0111             .reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0112         },
0113 
0114         {
0115             .ce_id = __cpu_to_le16(8),
0116             .reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0117         },
0118 
0119         {
0120             .ce_id = __cpu_to_le16(9),
0121             .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0122         },
0123 
0124         {
0125             .ce_id = __cpu_to_le16(10),
0126             .reg_offset =  __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0127         },
0128 
0129         {
0130             .ce_id = __cpu_to_le16(11),
0131             .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
0132         },
0133 };
0134 
0135 static struct ce_attr host_ce_config_wlan[] = {
0136     /* CE0: host->target HTC control streams */
0137     {
0138         .flags = CE_ATTR_FLAGS,
0139         .src_nentries = 16,
0140         .src_sz_max = 2048,
0141         .dest_nentries = 0,
0142         .send_cb = ath10k_snoc_htc_tx_cb,
0143     },
0144 
0145     /* CE1: target->host HTT + HTC control */
0146     {
0147         .flags = CE_ATTR_FLAGS,
0148         .src_nentries = 0,
0149         .src_sz_max = 2048,
0150         .dest_nentries = 512,
0151         .recv_cb = ath10k_snoc_htt_htc_rx_cb,
0152     },
0153 
0154     /* CE2: target->host WMI */
0155     {
0156         .flags = CE_ATTR_FLAGS,
0157         .src_nentries = 0,
0158         .src_sz_max = 2048,
0159         .dest_nentries = 64,
0160         .recv_cb = ath10k_snoc_htc_rx_cb,
0161     },
0162 
0163     /* CE3: host->target WMI */
0164     {
0165         .flags = CE_ATTR_FLAGS,
0166         .src_nentries = 32,
0167         .src_sz_max = 2048,
0168         .dest_nentries = 0,
0169         .send_cb = ath10k_snoc_htc_tx_cb,
0170     },
0171 
0172     /* CE4: host->target HTT */
0173     {
0174         .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0175         .src_nentries = 2048,
0176         .src_sz_max = 256,
0177         .dest_nentries = 0,
0178         .send_cb = ath10k_snoc_htt_tx_cb,
0179     },
0180 
0181     /* CE5: target->host HTT (ipa_uc->target ) */
0182     {
0183         .flags = CE_ATTR_FLAGS,
0184         .src_nentries = 0,
0185         .src_sz_max = 512,
0186         .dest_nentries = 512,
0187         .recv_cb = ath10k_snoc_htt_rx_cb,
0188     },
0189 
0190     /* CE6: target autonomous hif_memcpy */
0191     {
0192         .flags = CE_ATTR_FLAGS,
0193         .src_nentries = 0,
0194         .src_sz_max = 0,
0195         .dest_nentries = 0,
0196     },
0197 
0198     /* CE7: ce_diag, the Diagnostic Window */
0199     {
0200         .flags = CE_ATTR_FLAGS,
0201         .src_nentries = 2,
0202         .src_sz_max = 2048,
0203         .dest_nentries = 2,
0204     },
0205 
0206     /* CE8: Target to uMC */
0207     {
0208         .flags = CE_ATTR_FLAGS,
0209         .src_nentries = 0,
0210         .src_sz_max = 2048,
0211         .dest_nentries = 128,
0212     },
0213 
0214     /* CE9 target->host HTT */
0215     {
0216         .flags = CE_ATTR_FLAGS,
0217         .src_nentries = 0,
0218         .src_sz_max = 2048,
0219         .dest_nentries = 512,
0220         .recv_cb = ath10k_snoc_htt_htc_rx_cb,
0221     },
0222 
0223     /* CE10: target->host HTT */
0224     {
0225         .flags = CE_ATTR_FLAGS,
0226         .src_nentries = 0,
0227         .src_sz_max = 2048,
0228         .dest_nentries = 512,
0229         .recv_cb = ath10k_snoc_htt_htc_rx_cb,
0230     },
0231 
0232     /* CE11: target -> host PKTLOG */
0233     {
0234         .flags = CE_ATTR_FLAGS,
0235         .src_nentries = 0,
0236         .src_sz_max = 2048,
0237         .dest_nentries = 512,
0238         .recv_cb = ath10k_snoc_pktlog_rx_cb,
0239     },
0240 };
0241 
0242 static struct ce_pipe_config target_ce_config_wlan[] = {
0243     /* CE0: host->target HTC control and raw streams */
0244     {
0245         .pipenum = __cpu_to_le32(0),
0246         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0247         .nentries = __cpu_to_le32(32),
0248         .nbytes_max = __cpu_to_le32(2048),
0249         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0250         .reserved = __cpu_to_le32(0),
0251     },
0252 
0253     /* CE1: target->host HTT + HTC control */
0254     {
0255         .pipenum = __cpu_to_le32(1),
0256         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0257         .nentries = __cpu_to_le32(32),
0258         .nbytes_max = __cpu_to_le32(2048),
0259         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0260         .reserved = __cpu_to_le32(0),
0261     },
0262 
0263     /* CE2: target->host WMI */
0264     {
0265         .pipenum = __cpu_to_le32(2),
0266         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0267         .nentries = __cpu_to_le32(64),
0268         .nbytes_max = __cpu_to_le32(2048),
0269         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0270         .reserved = __cpu_to_le32(0),
0271     },
0272 
0273     /* CE3: host->target WMI */
0274     {
0275         .pipenum = __cpu_to_le32(3),
0276         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0277         .nentries = __cpu_to_le32(32),
0278         .nbytes_max = __cpu_to_le32(2048),
0279         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0280         .reserved = __cpu_to_le32(0),
0281     },
0282 
0283     /* CE4: host->target HTT */
0284     {
0285         .pipenum = __cpu_to_le32(4),
0286         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0287         .nentries = __cpu_to_le32(256),
0288         .nbytes_max = __cpu_to_le32(256),
0289         .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
0290         .reserved = __cpu_to_le32(0),
0291     },
0292 
0293     /* CE5: target->host HTT (HIF->HTT) */
0294     {
0295         .pipenum = __cpu_to_le32(5),
0296         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0297         .nentries = __cpu_to_le32(1024),
0298         .nbytes_max = __cpu_to_le32(64),
0299         .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
0300         .reserved = __cpu_to_le32(0),
0301     },
0302 
0303     /* CE6: Reserved for target autonomous hif_memcpy */
0304     {
0305         .pipenum = __cpu_to_le32(6),
0306         .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
0307         .nentries = __cpu_to_le32(32),
0308         .nbytes_max = __cpu_to_le32(16384),
0309         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0310         .reserved = __cpu_to_le32(0),
0311     },
0312 
0313     /* CE7 used only by Host */
0314     {
0315         .pipenum = __cpu_to_le32(7),
0316         .pipedir = __cpu_to_le32(4),
0317         .nentries = __cpu_to_le32(0),
0318         .nbytes_max = __cpu_to_le32(0),
0319         .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
0320         .reserved = __cpu_to_le32(0),
0321     },
0322 
0323     /* CE8 Target to uMC */
0324     {
0325         .pipenum = __cpu_to_le32(8),
0326         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0327         .nentries = __cpu_to_le32(32),
0328         .nbytes_max = __cpu_to_le32(2048),
0329         .flags = __cpu_to_le32(0),
0330         .reserved = __cpu_to_le32(0),
0331     },
0332 
0333     /* CE9 target->host HTT */
0334     {
0335         .pipenum = __cpu_to_le32(9),
0336         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0337         .nentries = __cpu_to_le32(32),
0338         .nbytes_max = __cpu_to_le32(2048),
0339         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0340         .reserved = __cpu_to_le32(0),
0341     },
0342 
0343     /* CE10 target->host HTT */
0344     {
0345         .pipenum = __cpu_to_le32(10),
0346         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0347         .nentries = __cpu_to_le32(32),
0348         .nbytes_max = __cpu_to_le32(2048),
0349         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0350         .reserved = __cpu_to_le32(0),
0351     },
0352 
0353     /* CE11 target autonomous qcache memcpy */
0354     {
0355         .pipenum = __cpu_to_le32(11),
0356         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0357         .nentries = __cpu_to_le32(32),
0358         .nbytes_max = __cpu_to_le32(2048),
0359         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0360         .reserved = __cpu_to_le32(0),
0361     },
0362 };
0363 
0364 static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
0365     {
0366         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
0367         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0368         __cpu_to_le32(3),
0369     },
0370     {
0371         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
0372         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0373         __cpu_to_le32(2),
0374     },
0375     {
0376         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
0377         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0378         __cpu_to_le32(3),
0379     },
0380     {
0381         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
0382         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0383         __cpu_to_le32(2),
0384     },
0385     {
0386         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
0387         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0388         __cpu_to_le32(3),
0389     },
0390     {
0391         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
0392         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0393         __cpu_to_le32(2),
0394     },
0395     {
0396         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
0397         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0398         __cpu_to_le32(3),
0399     },
0400     {
0401         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
0402         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0403         __cpu_to_le32(2),
0404     },
0405     {
0406         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
0407         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0408         __cpu_to_le32(3),
0409     },
0410     {
0411         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
0412         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0413         __cpu_to_le32(2),
0414     },
0415     {
0416         __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
0417         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0418         __cpu_to_le32(0),
0419     },
0420     {
0421         __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
0422         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0423         __cpu_to_le32(2),
0424     },
0425     { /* not used */
0426         __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
0427         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0428         __cpu_to_le32(0),
0429     },
0430     { /* not used */
0431         __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
0432         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0433         __cpu_to_le32(2),
0434     },
0435     {
0436         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
0437         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0438         __cpu_to_le32(4),
0439     },
0440     {
0441         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
0442         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0443         __cpu_to_le32(1),
0444     },
0445     { /* not used */
0446         __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
0447         __cpu_to_le32(PIPEDIR_OUT),
0448         __cpu_to_le32(5),
0449     },
0450     { /* in = DL = target -> host */
0451         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
0452         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0453         __cpu_to_le32(9),
0454     },
0455     { /* in = DL = target -> host */
0456         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
0457         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0458         __cpu_to_le32(10),
0459     },
0460     { /* in = DL = target -> host pktlog */
0461         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
0462         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0463         __cpu_to_le32(11),
0464     },
0465     /* (Additions here) */
0466 
0467     { /* must be last */
0468         __cpu_to_le32(0),
0469         __cpu_to_le32(0),
0470         __cpu_to_le32(0),
0471     },
0472 };
0473 
0474 static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
0475 {
0476     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0477 
0478     iowrite32(value, ar_snoc->mem + offset);
0479 }
0480 
0481 static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
0482 {
0483     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0484     u32 val;
0485 
0486     val = ioread32(ar_snoc->mem + offset);
0487 
0488     return val;
0489 }
0490 
0491 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
0492 {
0493     struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
0494     struct ath10k *ar = pipe->hif_ce_state;
0495     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0496     struct sk_buff *skb;
0497     dma_addr_t paddr;
0498     int ret;
0499 
0500     skb = dev_alloc_skb(pipe->buf_sz);
0501     if (!skb)
0502         return -ENOMEM;
0503 
0504     WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
0505 
0506     paddr = dma_map_single(ar->dev, skb->data,
0507                    skb->len + skb_tailroom(skb),
0508                    DMA_FROM_DEVICE);
0509     if (unlikely(dma_mapping_error(ar->dev, paddr))) {
0510         ath10k_warn(ar, "failed to dma map snoc rx buf\n");
0511         dev_kfree_skb_any(skb);
0512         return -EIO;
0513     }
0514 
0515     ATH10K_SKB_RXCB(skb)->paddr = paddr;
0516 
0517     spin_lock_bh(&ce->ce_lock);
0518     ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
0519     spin_unlock_bh(&ce->ce_lock);
0520     if (ret) {
0521         dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
0522                  DMA_FROM_DEVICE);
0523         dev_kfree_skb_any(skb);
0524         return ret;
0525     }
0526 
0527     return 0;
0528 }
0529 
0530 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
0531 {
0532     struct ath10k *ar = pipe->hif_ce_state;
0533     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0534     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0535     struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
0536     int ret, num;
0537 
0538     if (pipe->buf_sz == 0)
0539         return;
0540 
0541     if (!ce_pipe->dest_ring)
0542         return;
0543 
0544     spin_lock_bh(&ce->ce_lock);
0545     num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
0546     spin_unlock_bh(&ce->ce_lock);
0547     while (num--) {
0548         ret = __ath10k_snoc_rx_post_buf(pipe);
0549         if (ret) {
0550             if (ret == -ENOSPC)
0551                 break;
0552             ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
0553             mod_timer(&ar_snoc->rx_post_retry, jiffies +
0554                   ATH10K_SNOC_RX_POST_RETRY_MS);
0555             break;
0556         }
0557     }
0558 }
0559 
0560 static void ath10k_snoc_rx_post(struct ath10k *ar)
0561 {
0562     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0563     int i;
0564 
0565     for (i = 0; i < CE_COUNT; i++)
0566         ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
0567 }
0568 
0569 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
0570                       void (*callback)(struct ath10k *ar,
0571                                struct sk_buff *skb))
0572 {
0573     struct ath10k *ar = ce_state->ar;
0574     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0575     struct ath10k_snoc_pipe *pipe_info =  &ar_snoc->pipe_info[ce_state->id];
0576     struct sk_buff *skb;
0577     struct sk_buff_head list;
0578     void *transfer_context;
0579     unsigned int nbytes, max_nbytes;
0580 
0581     __skb_queue_head_init(&list);
0582     while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
0583                          &nbytes) == 0) {
0584         skb = transfer_context;
0585         max_nbytes = skb->len + skb_tailroom(skb);
0586         dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
0587                  max_nbytes, DMA_FROM_DEVICE);
0588 
0589         if (unlikely(max_nbytes < nbytes)) {
0590             ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
0591                     nbytes, max_nbytes);
0592             dev_kfree_skb_any(skb);
0593             continue;
0594         }
0595 
0596         skb_put(skb, nbytes);
0597         __skb_queue_tail(&list, skb);
0598     }
0599 
0600     while ((skb = __skb_dequeue(&list))) {
0601         ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
0602                ce_state->id, skb->len);
0603 
0604         callback(ar, skb);
0605     }
0606 
0607     ath10k_snoc_rx_post_pipe(pipe_info);
0608 }
0609 
0610 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
0611 {
0612     ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
0613 }
0614 
0615 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
0616 {
0617     /* CE4 polling needs to be done whenever CE pipe which transports
0618      * HTT Rx (target->host) is processed.
0619      */
0620     ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
0621 
0622     ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
0623 }
0624 
0625 /* Called by lower (CE) layer when data is received from the Target.
0626  * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
0627  */
0628 static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
0629 {
0630     ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
0631 }
0632 
0633 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
0634 {
0635     skb_pull(skb, sizeof(struct ath10k_htc_hdr));
0636     ath10k_htt_t2h_msg_handler(ar, skb);
0637 }
0638 
0639 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
0640 {
0641     ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
0642     ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
0643 }
0644 
0645 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
0646 {
0647     struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
0648     struct ath10k *ar = ar_snoc->ar;
0649 
0650     ath10k_snoc_rx_post(ar);
0651 }
0652 
0653 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
0654 {
0655     struct ath10k *ar = ce_state->ar;
0656     struct sk_buff_head list;
0657     struct sk_buff *skb;
0658 
0659     __skb_queue_head_init(&list);
0660     while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
0661         if (!skb)
0662             continue;
0663 
0664         __skb_queue_tail(&list, skb);
0665     }
0666 
0667     while ((skb = __skb_dequeue(&list)))
0668         ath10k_htc_tx_completion_handler(ar, skb);
0669 }
0670 
0671 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
0672 {
0673     struct ath10k *ar = ce_state->ar;
0674     struct sk_buff *skb;
0675 
0676     while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
0677         if (!skb)
0678             continue;
0679 
0680         dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
0681                  skb->len, DMA_TO_DEVICE);
0682         ath10k_htt_hif_tx_complete(ar, skb);
0683     }
0684 }
0685 
0686 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
0687                  struct ath10k_hif_sg_item *items, int n_items)
0688 {
0689     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0690     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0691     struct ath10k_snoc_pipe *snoc_pipe;
0692     struct ath10k_ce_pipe *ce_pipe;
0693     int err, i = 0;
0694 
0695     snoc_pipe = &ar_snoc->pipe_info[pipe_id];
0696     ce_pipe = snoc_pipe->ce_hdl;
0697     spin_lock_bh(&ce->ce_lock);
0698 
0699     for (i = 0; i < n_items - 1; i++) {
0700         ath10k_dbg(ar, ATH10K_DBG_SNOC,
0701                "snoc tx item %d paddr %pad len %d n_items %d\n",
0702                i, &items[i].paddr, items[i].len, n_items);
0703 
0704         err = ath10k_ce_send_nolock(ce_pipe,
0705                         items[i].transfer_context,
0706                         items[i].paddr,
0707                         items[i].len,
0708                         items[i].transfer_id,
0709                         CE_SEND_FLAG_GATHER);
0710         if (err)
0711             goto err;
0712     }
0713 
0714     ath10k_dbg(ar, ATH10K_DBG_SNOC,
0715            "snoc tx item %d paddr %pad len %d n_items %d\n",
0716            i, &items[i].paddr, items[i].len, n_items);
0717 
0718     err = ath10k_ce_send_nolock(ce_pipe,
0719                     items[i].transfer_context,
0720                     items[i].paddr,
0721                     items[i].len,
0722                     items[i].transfer_id,
0723                     0);
0724     if (err)
0725         goto err;
0726 
0727     spin_unlock_bh(&ce->ce_lock);
0728 
0729     return 0;
0730 
0731 err:
0732     for (; i > 0; i--)
0733         __ath10k_ce_send_revert(ce_pipe);
0734 
0735     spin_unlock_bh(&ce->ce_lock);
0736     return err;
0737 }
0738 
0739 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
0740                        struct bmi_target_info *target_info)
0741 {
0742     target_info->version = ATH10K_HW_WCN3990;
0743     target_info->type = ATH10K_HW_WCN3990;
0744 
0745     return 0;
0746 }
0747 
0748 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
0749 {
0750     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0751 
0752     ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
0753 
0754     return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
0755 }
0756 
0757 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
0758                         int force)
0759 {
0760     int resources;
0761 
0762     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
0763 
0764     if (!force) {
0765         resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
0766 
0767         if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
0768             return;
0769     }
0770     ath10k_ce_per_engine_service(ar, pipe);
0771 }
0772 
0773 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
0774                            u16 service_id,
0775                            u8 *ul_pipe, u8 *dl_pipe)
0776 {
0777     const struct ce_service_to_pipe *entry;
0778     bool ul_set = false, dl_set = false;
0779     int i;
0780 
0781     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
0782 
0783     for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
0784         entry = &target_service_to_ce_map_wlan[i];
0785 
0786         if (__le32_to_cpu(entry->service_id) != service_id)
0787             continue;
0788 
0789         switch (__le32_to_cpu(entry->pipedir)) {
0790         case PIPEDIR_NONE:
0791             break;
0792         case PIPEDIR_IN:
0793             WARN_ON(dl_set);
0794             *dl_pipe = __le32_to_cpu(entry->pipenum);
0795             dl_set = true;
0796             break;
0797         case PIPEDIR_OUT:
0798             WARN_ON(ul_set);
0799             *ul_pipe = __le32_to_cpu(entry->pipenum);
0800             ul_set = true;
0801             break;
0802         case PIPEDIR_INOUT:
0803             WARN_ON(dl_set);
0804             WARN_ON(ul_set);
0805             *dl_pipe = __le32_to_cpu(entry->pipenum);
0806             *ul_pipe = __le32_to_cpu(entry->pipenum);
0807             dl_set = true;
0808             ul_set = true;
0809             break;
0810         }
0811     }
0812 
0813     if (!ul_set || !dl_set)
0814         return -ENOENT;
0815 
0816     return 0;
0817 }
0818 
0819 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
0820                          u8 *ul_pipe, u8 *dl_pipe)
0821 {
0822     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
0823 
0824     (void)ath10k_snoc_hif_map_service_to_pipe(ar,
0825                          ATH10K_HTC_SVC_ID_RSVD_CTRL,
0826                          ul_pipe, dl_pipe);
0827 }
0828 
0829 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
0830 {
0831     ath10k_ce_disable_interrupts(ar);
0832 }
0833 
0834 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
0835 {
0836     ath10k_ce_enable_interrupts(ar);
0837 }
0838 
0839 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
0840 {
0841     struct ath10k_ce_pipe *ce_pipe;
0842     struct ath10k_ce_ring *ce_ring;
0843     struct sk_buff *skb;
0844     struct ath10k *ar;
0845     int i;
0846 
0847     ar = snoc_pipe->hif_ce_state;
0848     ce_pipe = snoc_pipe->ce_hdl;
0849     ce_ring = ce_pipe->dest_ring;
0850 
0851     if (!ce_ring)
0852         return;
0853 
0854     if (!snoc_pipe->buf_sz)
0855         return;
0856 
0857     for (i = 0; i < ce_ring->nentries; i++) {
0858         skb = ce_ring->per_transfer_context[i];
0859         if (!skb)
0860             continue;
0861 
0862         ce_ring->per_transfer_context[i] = NULL;
0863 
0864         dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
0865                  skb->len + skb_tailroom(skb),
0866                  DMA_FROM_DEVICE);
0867         dev_kfree_skb_any(skb);
0868     }
0869 }
0870 
0871 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
0872 {
0873     struct ath10k_ce_pipe *ce_pipe;
0874     struct ath10k_ce_ring *ce_ring;
0875     struct sk_buff *skb;
0876     struct ath10k *ar;
0877     int i;
0878 
0879     ar = snoc_pipe->hif_ce_state;
0880     ce_pipe = snoc_pipe->ce_hdl;
0881     ce_ring = ce_pipe->src_ring;
0882 
0883     if (!ce_ring)
0884         return;
0885 
0886     if (!snoc_pipe->buf_sz)
0887         return;
0888 
0889     for (i = 0; i < ce_ring->nentries; i++) {
0890         skb = ce_ring->per_transfer_context[i];
0891         if (!skb)
0892             continue;
0893 
0894         ce_ring->per_transfer_context[i] = NULL;
0895 
0896         ath10k_htc_tx_completion_handler(ar, skb);
0897     }
0898 }
0899 
0900 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
0901 {
0902     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0903     struct ath10k_snoc_pipe *pipe_info;
0904     int pipe_num;
0905 
0906     del_timer_sync(&ar_snoc->rx_post_retry);
0907     for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
0908         pipe_info = &ar_snoc->pipe_info[pipe_num];
0909         ath10k_snoc_rx_pipe_cleanup(pipe_info);
0910         ath10k_snoc_tx_pipe_cleanup(pipe_info);
0911     }
0912 }
0913 
0914 static void ath10k_snoc_hif_stop(struct ath10k *ar)
0915 {
0916     if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
0917         ath10k_snoc_irq_disable(ar);
0918 
0919     ath10k_core_napi_sync_disable(ar);
0920     ath10k_snoc_buffer_cleanup(ar);
0921     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
0922 }
0923 
0924 static int ath10k_snoc_hif_start(struct ath10k *ar)
0925 {
0926     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
0927 
0928     bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
0929 
0930     ath10k_core_napi_enable(ar);
0931     ath10k_snoc_irq_enable(ar);
0932     ath10k_snoc_rx_post(ar);
0933 
0934     clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
0935 
0936     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
0937 
0938     return 0;
0939 }
0940 
0941 static int ath10k_snoc_init_pipes(struct ath10k *ar)
0942 {
0943     int i, ret;
0944 
0945     for (i = 0; i < CE_COUNT; i++) {
0946         ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
0947         if (ret) {
0948             ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
0949                    i, ret);
0950             return ret;
0951         }
0952     }
0953 
0954     return 0;
0955 }
0956 
0957 static int ath10k_snoc_wlan_enable(struct ath10k *ar,
0958                    enum ath10k_firmware_mode fw_mode)
0959 {
0960     struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
0961     struct ath10k_qmi_wlan_enable_cfg cfg;
0962     enum wlfw_driver_mode_enum_v01 mode;
0963     int pipe_num;
0964 
0965     for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
0966         tgt_cfg[pipe_num].pipe_num =
0967                 target_ce_config_wlan[pipe_num].pipenum;
0968         tgt_cfg[pipe_num].pipe_dir =
0969                 target_ce_config_wlan[pipe_num].pipedir;
0970         tgt_cfg[pipe_num].nentries =
0971                 target_ce_config_wlan[pipe_num].nentries;
0972         tgt_cfg[pipe_num].nbytes_max =
0973                 target_ce_config_wlan[pipe_num].nbytes_max;
0974         tgt_cfg[pipe_num].flags =
0975                 target_ce_config_wlan[pipe_num].flags;
0976         tgt_cfg[pipe_num].reserved = 0;
0977     }
0978 
0979     cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
0980                 sizeof(struct ath10k_tgt_pipe_cfg);
0981     cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
0982         &tgt_cfg;
0983     cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
0984                   sizeof(struct ath10k_svc_pipe_cfg);
0985     cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
0986         &target_service_to_ce_map_wlan;
0987     cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
0988     cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
0989         &target_shadow_reg_cfg_map;
0990 
0991     switch (fw_mode) {
0992     case ATH10K_FIRMWARE_MODE_NORMAL:
0993         mode = QMI_WLFW_MISSION_V01;
0994         break;
0995     case ATH10K_FIRMWARE_MODE_UTF:
0996         mode = QMI_WLFW_FTM_V01;
0997         break;
0998     default:
0999         ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
1000         return -EINVAL;
1001     }
1002 
1003     return ath10k_qmi_wlan_enable(ar, &cfg, mode,
1004                        NULL);
1005 }
1006 
1007 static int ath10k_hw_power_on(struct ath10k *ar)
1008 {
1009     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1010     int ret;
1011 
1012     ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1013 
1014     ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
1015     if (ret)
1016         return ret;
1017 
1018     ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
1019     if (ret)
1020         goto vreg_off;
1021 
1022     return ret;
1023 
1024 vreg_off:
1025     regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1026     return ret;
1027 }
1028 
1029 static int ath10k_hw_power_off(struct ath10k *ar)
1030 {
1031     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1032 
1033     ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1034 
1035     clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
1036 
1037     return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
1038 }
1039 
1040 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
1041 {
1042     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1043 
1044     /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
1045      * flags are not set, it means that the driver has restarted
1046      * due to a crash inject via debugfs. In this case, the driver
1047      * needs to restart the firmware and hence send qmi wlan disable,
1048      * during the driver restart sequence.
1049      */
1050     if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
1051         !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1052         ath10k_qmi_wlan_disable(ar);
1053 }
1054 
1055 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
1056 {
1057     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1058 
1059     ath10k_snoc_wlan_disable(ar);
1060     ath10k_ce_free_rri(ar);
1061     ath10k_hw_power_off(ar);
1062 }
1063 
1064 static int ath10k_snoc_hif_power_up(struct ath10k *ar,
1065                     enum ath10k_firmware_mode fw_mode)
1066 {
1067     int ret;
1068 
1069     ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
1070            __func__, ar->state);
1071 
1072     ret = ath10k_hw_power_on(ar);
1073     if (ret) {
1074         ath10k_err(ar, "failed to power on device: %d\n", ret);
1075         return ret;
1076     }
1077 
1078     ret = ath10k_snoc_wlan_enable(ar, fw_mode);
1079     if (ret) {
1080         ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
1081         goto err_hw_power_off;
1082     }
1083 
1084     ath10k_ce_alloc_rri(ar);
1085 
1086     ret = ath10k_snoc_init_pipes(ar);
1087     if (ret) {
1088         ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1089         goto err_free_rri;
1090     }
1091 
1092     return 0;
1093 
1094 err_free_rri:
1095     ath10k_ce_free_rri(ar);
1096     ath10k_snoc_wlan_disable(ar);
1097 
1098 err_hw_power_off:
1099     ath10k_hw_power_off(ar);
1100 
1101     return ret;
1102 }
1103 
1104 static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
1105                            u8 fw_log_mode)
1106 {
1107     u8 fw_dbg_mode;
1108 
1109     if (fw_log_mode)
1110         fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
1111     else
1112         fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
1113 
1114     return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
1115 }
1116 
1117 #ifdef CONFIG_PM
1118 static int ath10k_snoc_hif_suspend(struct ath10k *ar)
1119 {
1120     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1121     int ret;
1122 
1123     if (!device_may_wakeup(ar->dev))
1124         return -EPERM;
1125 
1126     ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1127     if (ret) {
1128         ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
1129         return ret;
1130     }
1131 
1132     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
1133 
1134     return ret;
1135 }
1136 
1137 static int ath10k_snoc_hif_resume(struct ath10k *ar)
1138 {
1139     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1140     int ret;
1141 
1142     if (!device_may_wakeup(ar->dev))
1143         return -EPERM;
1144 
1145     ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
1146     if (ret) {
1147         ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
1148         return ret;
1149     }
1150 
1151     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
1152 
1153     return ret;
1154 }
1155 #endif
1156 
1157 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
1158     .read32     = ath10k_snoc_read32,
1159     .write32    = ath10k_snoc_write32,
1160     .start      = ath10k_snoc_hif_start,
1161     .stop       = ath10k_snoc_hif_stop,
1162     .map_service_to_pipe    = ath10k_snoc_hif_map_service_to_pipe,
1163     .get_default_pipe   = ath10k_snoc_hif_get_default_pipe,
1164     .power_up       = ath10k_snoc_hif_power_up,
1165     .power_down     = ath10k_snoc_hif_power_down,
1166     .tx_sg          = ath10k_snoc_hif_tx_sg,
1167     .send_complete_check    = ath10k_snoc_hif_send_complete_check,
1168     .get_free_queue_number  = ath10k_snoc_hif_get_free_queue_number,
1169     .get_target_info    = ath10k_snoc_hif_get_target_info,
1170     .set_target_log_mode    = ath10k_snoc_hif_set_target_log_mode,
1171 
1172 #ifdef CONFIG_PM
1173     .suspend                = ath10k_snoc_hif_suspend,
1174     .resume                 = ath10k_snoc_hif_resume,
1175 #endif
1176 };
1177 
1178 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
1179     .read32     = ath10k_snoc_read32,
1180     .write32    = ath10k_snoc_write32,
1181 };
1182 
1183 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
1184 {
1185     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1186     int i;
1187 
1188     for (i = 0; i < CE_COUNT_MAX; i++) {
1189         if (ar_snoc->ce_irqs[i].irq_line == irq)
1190             return i;
1191     }
1192     ath10k_err(ar, "No matching CE id for irq %d\n", irq);
1193 
1194     return -EINVAL;
1195 }
1196 
1197 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
1198 {
1199     struct ath10k *ar = arg;
1200     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1201     int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
1202 
1203     if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
1204         ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1205                 ce_id);
1206         return IRQ_HANDLED;
1207     }
1208 
1209     ath10k_ce_disable_interrupt(ar, ce_id);
1210     set_bit(ce_id, ar_snoc->pending_ce_irqs);
1211 
1212     napi_schedule(&ar->napi);
1213 
1214     return IRQ_HANDLED;
1215 }
1216 
1217 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
1218 {
1219     struct ath10k *ar = container_of(ctx, struct ath10k, napi);
1220     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1221     int done = 0;
1222     int ce_id;
1223 
1224     if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
1225         napi_complete(ctx);
1226         return done;
1227     }
1228 
1229     for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1230         if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
1231             ath10k_ce_per_engine_service(ar, ce_id);
1232             ath10k_ce_enable_interrupt(ar, ce_id);
1233         }
1234 
1235     done = ath10k_htt_txrx_compl_task(ar, budget);
1236 
1237     if (done < budget)
1238         napi_complete(ctx);
1239 
1240     return done;
1241 }
1242 
1243 static void ath10k_snoc_init_napi(struct ath10k *ar)
1244 {
1245     netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
1246                NAPI_POLL_WEIGHT);
1247 }
1248 
1249 static int ath10k_snoc_request_irq(struct ath10k *ar)
1250 {
1251     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1252     int ret, id;
1253 
1254     for (id = 0; id < CE_COUNT_MAX; id++) {
1255         ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
1256                   ath10k_snoc_per_engine_handler, 0,
1257                   ce_name[id], ar);
1258         if (ret) {
1259             ath10k_err(ar,
1260                    "failed to register IRQ handler for CE %d: %d\n",
1261                    id, ret);
1262             goto err_irq;
1263         }
1264     }
1265 
1266     return 0;
1267 
1268 err_irq:
1269     for (id -= 1; id >= 0; id--)
1270         free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1271 
1272     return ret;
1273 }
1274 
1275 static void ath10k_snoc_free_irq(struct ath10k *ar)
1276 {
1277     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1278     int id;
1279 
1280     for (id = 0; id < CE_COUNT_MAX; id++)
1281         free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
1282 }
1283 
1284 static int ath10k_snoc_resource_init(struct ath10k *ar)
1285 {
1286     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1287     struct platform_device *pdev;
1288     struct resource *res;
1289     int i, ret = 0;
1290 
1291     pdev = ar_snoc->dev;
1292     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
1293     if (!res) {
1294         ath10k_err(ar, "Memory base not found in DT\n");
1295         return -EINVAL;
1296     }
1297 
1298     ar_snoc->mem_pa = res->start;
1299     ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
1300                     resource_size(res));
1301     if (!ar_snoc->mem) {
1302         ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
1303                &ar_snoc->mem_pa);
1304         return -EINVAL;
1305     }
1306 
1307     for (i = 0; i < CE_COUNT; i++) {
1308         ret = platform_get_irq(ar_snoc->dev, i);
1309         if (ret < 0)
1310             return ret;
1311         ar_snoc->ce_irqs[i].irq_line = ret;
1312     }
1313 
1314     ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
1315                        &ar_snoc->xo_cal_data);
1316     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
1317     if (ret == 0) {
1318         ar_snoc->xo_cal_supported = true;
1319         ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
1320                ar_snoc->xo_cal_data);
1321     }
1322 
1323     return 0;
1324 }
1325 
1326 static void ath10k_snoc_quirks_init(struct ath10k *ar)
1327 {
1328     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1329     struct device *dev = &ar_snoc->dev->dev;
1330 
1331     if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
1332         set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
1333 }
1334 
1335 int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
1336 {
1337     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1338     struct ath10k_bus_params bus_params = {};
1339     int ret;
1340 
1341     if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
1342         return 0;
1343 
1344     switch (type) {
1345     case ATH10K_QMI_EVENT_FW_READY_IND:
1346         if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
1347             ath10k_core_start_recovery(ar);
1348             break;
1349         }
1350 
1351         bus_params.dev_type = ATH10K_DEV_TYPE_LL;
1352         bus_params.chip_id = ar_snoc->target_info.soc_version;
1353         ret = ath10k_core_register(ar, &bus_params);
1354         if (ret) {
1355             ath10k_err(ar, "Failed to register driver core: %d\n",
1356                    ret);
1357             return ret;
1358         }
1359         set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
1360         break;
1361     case ATH10K_QMI_EVENT_FW_DOWN_IND:
1362         set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
1363         set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
1364         break;
1365     default:
1366         ath10k_err(ar, "invalid fw indication: %llx\n", type);
1367         return -EINVAL;
1368     }
1369 
1370     return 0;
1371 }
1372 
1373 static int ath10k_snoc_setup_resource(struct ath10k *ar)
1374 {
1375     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1376     struct ath10k_ce *ce = ath10k_ce_priv(ar);
1377     struct ath10k_snoc_pipe *pipe;
1378     int i, ret;
1379 
1380     timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
1381     spin_lock_init(&ce->ce_lock);
1382     for (i = 0; i < CE_COUNT; i++) {
1383         pipe = &ar_snoc->pipe_info[i];
1384         pipe->ce_hdl = &ce->ce_states[i];
1385         pipe->pipe_num = i;
1386         pipe->hif_ce_state = ar;
1387 
1388         ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1389         if (ret) {
1390             ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1391                    i, ret);
1392             return ret;
1393         }
1394 
1395         pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
1396     }
1397     ath10k_snoc_init_napi(ar);
1398 
1399     return 0;
1400 }
1401 
1402 static void ath10k_snoc_release_resource(struct ath10k *ar)
1403 {
1404     int i;
1405 
1406     netif_napi_del(&ar->napi);
1407     for (i = 0; i < CE_COUNT; i++)
1408         ath10k_ce_free_pipe(ar, i);
1409 }
1410 
1411 static void ath10k_msa_dump_memory(struct ath10k *ar,
1412                    struct ath10k_fw_crash_data *crash_data)
1413 {
1414     const struct ath10k_hw_mem_layout *mem_layout;
1415     const struct ath10k_mem_region *current_region;
1416     struct ath10k_dump_ram_data_hdr *hdr;
1417     size_t buf_len;
1418     u8 *buf;
1419 
1420     if (!crash_data || !crash_data->ramdump_buf)
1421         return;
1422 
1423     mem_layout = ath10k_coredump_get_mem_layout(ar);
1424     if (!mem_layout)
1425         return;
1426 
1427     current_region = &mem_layout->region_table.regions[0];
1428 
1429     buf = crash_data->ramdump_buf;
1430     buf_len = crash_data->ramdump_buf_len;
1431     memset(buf, 0, buf_len);
1432 
1433     /* Reserve space for the header. */
1434     hdr = (void *)buf;
1435     buf += sizeof(*hdr);
1436     buf_len -= sizeof(*hdr);
1437 
1438     hdr->region_type = cpu_to_le32(current_region->type);
1439     hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
1440     hdr->length = cpu_to_le32(ar->msa.mem_size);
1441 
1442     if (current_region->len < ar->msa.mem_size) {
1443         memcpy(buf, ar->msa.vaddr, current_region->len);
1444         ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
1445                 current_region->len, ar->msa.mem_size);
1446     } else {
1447         memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
1448     }
1449 }
1450 
1451 void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
1452 {
1453     struct ath10k_fw_crash_data *crash_data;
1454     char guid[UUID_STRING_LEN + 1];
1455 
1456     mutex_lock(&ar->dump_mutex);
1457 
1458     spin_lock_bh(&ar->data_lock);
1459     ar->stats.fw_crash_counter++;
1460     spin_unlock_bh(&ar->data_lock);
1461 
1462     crash_data = ath10k_coredump_new(ar);
1463 
1464     if (crash_data)
1465         scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1466     else
1467         scnprintf(guid, sizeof(guid), "n/a");
1468 
1469     ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1470     ath10k_print_driver_info(ar);
1471     ath10k_msa_dump_memory(ar, crash_data);
1472     mutex_unlock(&ar->dump_mutex);
1473 }
1474 
1475 static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action,
1476                     void *data)
1477 {
1478     struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb);
1479     struct ath10k *ar = ar_snoc->ar;
1480     struct qcom_ssr_notify_data *notify_data = data;
1481 
1482     switch (action) {
1483     case QCOM_SSR_BEFORE_POWERUP:
1484         ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n");
1485         clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
1486         break;
1487 
1488     case QCOM_SSR_AFTER_POWERUP:
1489         ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n");
1490         break;
1491 
1492     case QCOM_SSR_BEFORE_SHUTDOWN:
1493         ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n",
1494                notify_data->crashed ? "crashed" : "stopping");
1495         if (!notify_data->crashed)
1496             set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
1497         else
1498             clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
1499         break;
1500 
1501     case QCOM_SSR_AFTER_SHUTDOWN:
1502         ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n");
1503         break;
1504 
1505     default:
1506         ath10k_err(ar, "received unrecognized event %lu\n", action);
1507         break;
1508     }
1509 
1510     return NOTIFY_OK;
1511 }
1512 
1513 static int ath10k_modem_init(struct ath10k *ar)
1514 {
1515     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1516     void *notifier;
1517     int ret;
1518 
1519     ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify;
1520 
1521     notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb);
1522     if (IS_ERR(notifier)) {
1523         ret = PTR_ERR(notifier);
1524         ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret);
1525         return ret;
1526     }
1527 
1528     ar_snoc->notifier = notifier;
1529 
1530     return 0;
1531 }
1532 
1533 static void ath10k_modem_deinit(struct ath10k *ar)
1534 {
1535     int ret;
1536     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1537 
1538     ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb);
1539     if (ret)
1540         ath10k_err(ar, "error %d unregistering notifier\n", ret);
1541 }
1542 
1543 static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
1544 {
1545     struct device *dev = ar->dev;
1546     struct device_node *node;
1547     struct resource r;
1548     int ret;
1549 
1550     node = of_parse_phandle(dev->of_node, "memory-region", 0);
1551     if (node) {
1552         ret = of_address_to_resource(node, 0, &r);
1553         of_node_put(node);
1554         if (ret) {
1555             dev_err(dev, "failed to resolve msa fixed region\n");
1556             return ret;
1557         }
1558 
1559         ar->msa.paddr = r.start;
1560         ar->msa.mem_size = resource_size(&r);
1561         ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
1562                           ar->msa.mem_size,
1563                           MEMREMAP_WT);
1564         if (IS_ERR(ar->msa.vaddr)) {
1565             dev_err(dev, "failed to map memory region: %pa\n",
1566                 &r.start);
1567             return PTR_ERR(ar->msa.vaddr);
1568         }
1569     } else {
1570         ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
1571                             &ar->msa.paddr,
1572                             GFP_KERNEL);
1573         if (!ar->msa.vaddr) {
1574             ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1575             return -ENOMEM;
1576         }
1577         ar->msa.mem_size = msa_size;
1578     }
1579 
1580     ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
1581            &ar->msa.paddr,
1582            ar->msa.vaddr);
1583 
1584     return 0;
1585 }
1586 
1587 static int ath10k_fw_init(struct ath10k *ar)
1588 {
1589     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1590     struct device *host_dev = &ar_snoc->dev->dev;
1591     struct platform_device_info info;
1592     struct iommu_domain *iommu_dom;
1593     struct platform_device *pdev;
1594     struct device_node *node;
1595     int ret;
1596 
1597     node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
1598     if (!node) {
1599         ar_snoc->use_tz = true;
1600         return 0;
1601     }
1602 
1603     memset(&info, 0, sizeof(info));
1604     info.fwnode = &node->fwnode;
1605     info.parent = host_dev;
1606     info.name = node->name;
1607     info.dma_mask = DMA_BIT_MASK(32);
1608 
1609     pdev = platform_device_register_full(&info);
1610     if (IS_ERR(pdev)) {
1611         of_node_put(node);
1612         return PTR_ERR(pdev);
1613     }
1614 
1615     pdev->dev.of_node = node;
1616 
1617     ret = of_dma_configure(&pdev->dev, node, true);
1618     if (ret) {
1619         ath10k_err(ar, "dma configure fail: %d\n", ret);
1620         goto err_unregister;
1621     }
1622 
1623     ar_snoc->fw.dev = &pdev->dev;
1624 
1625     iommu_dom = iommu_domain_alloc(&platform_bus_type);
1626     if (!iommu_dom) {
1627         ath10k_err(ar, "failed to allocate iommu domain\n");
1628         ret = -ENOMEM;
1629         goto err_unregister;
1630     }
1631 
1632     ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
1633     if (ret) {
1634         ath10k_err(ar, "could not attach device: %d\n", ret);
1635         goto err_iommu_free;
1636     }
1637 
1638     ar_snoc->fw.iommu_domain = iommu_dom;
1639     ar_snoc->fw.fw_start_addr = ar->msa.paddr;
1640 
1641     ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
1642             ar->msa.paddr, ar->msa.mem_size,
1643             IOMMU_READ | IOMMU_WRITE);
1644     if (ret) {
1645         ath10k_err(ar, "failed to map firmware region: %d\n", ret);
1646         goto err_iommu_detach;
1647     }
1648 
1649     of_node_put(node);
1650 
1651     return 0;
1652 
1653 err_iommu_detach:
1654     iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
1655 
1656 err_iommu_free:
1657     iommu_domain_free(iommu_dom);
1658 
1659 err_unregister:
1660     platform_device_unregister(pdev);
1661     of_node_put(node);
1662 
1663     return ret;
1664 }
1665 
1666 static int ath10k_fw_deinit(struct ath10k *ar)
1667 {
1668     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1669     const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
1670     struct iommu_domain *iommu;
1671     size_t unmapped_size;
1672 
1673     if (ar_snoc->use_tz)
1674         return 0;
1675 
1676     iommu = ar_snoc->fw.iommu_domain;
1677 
1678     unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
1679                     mapped_size);
1680     if (unmapped_size != mapped_size)
1681         ath10k_err(ar, "failed to unmap firmware: %zu\n",
1682                unmapped_size);
1683 
1684     iommu_detach_device(iommu, ar_snoc->fw.dev);
1685     iommu_domain_free(iommu);
1686 
1687     platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
1688 
1689     return 0;
1690 }
1691 
1692 static const struct of_device_id ath10k_snoc_dt_match[] = {
1693     { .compatible = "qcom,wcn3990-wifi",
1694      .data = &drv_priv,
1695     },
1696     { }
1697 };
1698 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1699 
1700 static int ath10k_snoc_probe(struct platform_device *pdev)
1701 {
1702     const struct ath10k_snoc_drv_priv *drv_data;
1703     struct ath10k_snoc *ar_snoc;
1704     struct device *dev;
1705     struct ath10k *ar;
1706     u32 msa_size;
1707     int ret;
1708     u32 i;
1709 
1710     dev = &pdev->dev;
1711     drv_data = device_get_match_data(dev);
1712     if (!drv_data) {
1713         dev_err(dev, "failed to find matching device tree id\n");
1714         return -EINVAL;
1715     }
1716 
1717     ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1718     if (ret) {
1719         dev_err(dev, "failed to set dma mask: %d\n", ret);
1720         return ret;
1721     }
1722 
1723     ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1724                 drv_data->hw_rev, &ath10k_snoc_hif_ops);
1725     if (!ar) {
1726         dev_err(dev, "failed to allocate core\n");
1727         return -ENOMEM;
1728     }
1729 
1730     ar_snoc = ath10k_snoc_priv(ar);
1731     ar_snoc->dev = pdev;
1732     platform_set_drvdata(pdev, ar);
1733     ar_snoc->ar = ar;
1734     ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1735     ar->ce_priv = &ar_snoc->ce;
1736     msa_size = drv_data->msa_size;
1737 
1738     ath10k_snoc_quirks_init(ar);
1739 
1740     ret = ath10k_snoc_resource_init(ar);
1741     if (ret) {
1742         ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1743         goto err_core_destroy;
1744     }
1745 
1746     ret = ath10k_snoc_setup_resource(ar);
1747     if (ret) {
1748         ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1749         goto err_core_destroy;
1750     }
1751     ret = ath10k_snoc_request_irq(ar);
1752     if (ret) {
1753         ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1754         goto err_release_resource;
1755     }
1756 
1757     ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
1758     ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
1759                       sizeof(*ar_snoc->vregs), GFP_KERNEL);
1760     if (!ar_snoc->vregs) {
1761         ret = -ENOMEM;
1762         goto err_free_irq;
1763     }
1764     for (i = 0; i < ar_snoc->num_vregs; i++)
1765         ar_snoc->vregs[i].supply = ath10k_regulators[i];
1766 
1767     ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
1768                       ar_snoc->vregs);
1769     if (ret < 0)
1770         goto err_free_irq;
1771 
1772     ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
1773     ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
1774                      sizeof(*ar_snoc->clks), GFP_KERNEL);
1775     if (!ar_snoc->clks) {
1776         ret = -ENOMEM;
1777         goto err_free_irq;
1778     }
1779 
1780     for (i = 0; i < ar_snoc->num_clks; i++)
1781         ar_snoc->clks[i].id = ath10k_clocks[i];
1782 
1783     ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
1784                      ar_snoc->clks);
1785     if (ret)
1786         goto err_free_irq;
1787 
1788     ret = ath10k_setup_msa_resources(ar, msa_size);
1789     if (ret) {
1790         ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
1791         goto err_free_irq;
1792     }
1793 
1794     ret = ath10k_fw_init(ar);
1795     if (ret) {
1796         ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
1797         goto err_free_irq;
1798     }
1799 
1800     ret = ath10k_qmi_init(ar, msa_size);
1801     if (ret) {
1802         ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
1803         goto err_fw_deinit;
1804     }
1805 
1806     ret = ath10k_modem_init(ar);
1807     if (ret)
1808         goto err_qmi_deinit;
1809 
1810     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1811 
1812     return 0;
1813 
1814 err_qmi_deinit:
1815     ath10k_qmi_deinit(ar);
1816 
1817 err_fw_deinit:
1818     ath10k_fw_deinit(ar);
1819 
1820 err_free_irq:
1821     ath10k_snoc_free_irq(ar);
1822 
1823 err_release_resource:
1824     ath10k_snoc_release_resource(ar);
1825 
1826 err_core_destroy:
1827     ath10k_core_destroy(ar);
1828 
1829     return ret;
1830 }
1831 
1832 static int ath10k_snoc_free_resources(struct ath10k *ar)
1833 {
1834     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1835 
1836     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n");
1837 
1838     set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
1839 
1840     ath10k_core_unregister(ar);
1841     ath10k_fw_deinit(ar);
1842     ath10k_snoc_free_irq(ar);
1843     ath10k_snoc_release_resource(ar);
1844     ath10k_modem_deinit(ar);
1845     ath10k_qmi_deinit(ar);
1846     ath10k_core_destroy(ar);
1847 
1848     return 0;
1849 }
1850 
1851 static int ath10k_snoc_remove(struct platform_device *pdev)
1852 {
1853     struct ath10k *ar = platform_get_drvdata(pdev);
1854     struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1855 
1856     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1857 
1858     reinit_completion(&ar->driver_recovery);
1859 
1860     if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
1861         wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
1862 
1863     ath10k_snoc_free_resources(ar);
1864 
1865     return 0;
1866 }
1867 
1868 static void ath10k_snoc_shutdown(struct platform_device *pdev)
1869 {
1870     struct ath10k *ar = platform_get_drvdata(pdev);
1871 
1872     ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
1873     ath10k_snoc_free_resources(ar);
1874 }
1875 
1876 static struct platform_driver ath10k_snoc_driver = {
1877     .probe  = ath10k_snoc_probe,
1878     .remove = ath10k_snoc_remove,
1879     .shutdown =  ath10k_snoc_shutdown,
1880     .driver = {
1881         .name   = "ath10k_snoc",
1882         .of_match_table = ath10k_snoc_dt_match,
1883     },
1884 };
1885 module_platform_driver(ath10k_snoc_driver);
1886 
1887 MODULE_AUTHOR("Qualcomm");
1888 MODULE_LICENSE("Dual BSD/GPL");
1889 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");