Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: ISC
0002 /*
0003  * Copyright (c) 2005-2011 Atheros Communications Inc.
0004  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
0005  */
0006 
0007 #include <linux/pci.h>
0008 #include <linux/module.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/bitops.h>
0012 
0013 #include "core.h"
0014 #include "debug.h"
0015 #include "coredump.h"
0016 
0017 #include "targaddrs.h"
0018 #include "bmi.h"
0019 
0020 #include "hif.h"
0021 #include "htc.h"
0022 
0023 #include "ce.h"
0024 #include "pci.h"
0025 
0026 enum ath10k_pci_reset_mode {
0027     ATH10K_PCI_RESET_AUTO = 0,
0028     ATH10K_PCI_RESET_WARM_ONLY = 1,
0029 };
0030 
0031 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
0032 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
0033 
0034 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
0035 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
0036 
0037 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
0038 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
0039 
0040 /* how long wait to wait for target to initialise, in ms */
0041 #define ATH10K_PCI_TARGET_WAIT 3000
0042 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
0043 
0044 /* Maximum number of bytes that can be handled atomically by
0045  * diag read and write.
0046  */
0047 #define ATH10K_DIAG_TRANSFER_LIMIT  0x5000
0048 
0049 #define QCA99X0_PCIE_BAR0_START_REG    0x81030
0050 #define QCA99X0_CPU_MEM_ADDR_REG       0x4d00c
0051 #define QCA99X0_CPU_MEM_DATA_REG       0x4d010
0052 
0053 static const struct pci_device_id ath10k_pci_id_table[] = {
0054     /* PCI-E QCA988X V2 (Ubiquiti branded) */
0055     { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
0056 
0057     { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
0058     { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
0059     { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
0060     { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
0061     { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
0062     { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
0063     { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
0064     { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
0065     {0}
0066 };
0067 
0068 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
0069     /* QCA988X pre 2.0 chips are not supported because they need some nasty
0070      * hacks. ath10k doesn't have them and these devices crash horribly
0071      * because of that.
0072      */
0073     { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
0074     { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
0075 
0076     { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
0077     { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
0078     { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
0079     { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
0080     { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
0081 
0082     { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
0083     { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
0084     { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
0085     { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
0086     { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
0087 
0088     { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
0089 
0090     { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
0091 
0092     { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
0093 
0094     { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
0095     { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
0096 
0097     { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
0098 };
0099 
0100 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
0101 static int ath10k_pci_cold_reset(struct ath10k *ar);
0102 static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
0103 static int ath10k_pci_init_irq(struct ath10k *ar);
0104 static int ath10k_pci_deinit_irq(struct ath10k *ar);
0105 static int ath10k_pci_request_irq(struct ath10k *ar);
0106 static void ath10k_pci_free_irq(struct ath10k *ar);
0107 static int ath10k_pci_bmi_wait(struct ath10k *ar,
0108                    struct ath10k_ce_pipe *tx_pipe,
0109                    struct ath10k_ce_pipe *rx_pipe,
0110                    struct bmi_xfer *xfer);
0111 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
0112 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
0113 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
0114 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
0115 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
0116 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
0117 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
0118 
0119 static const struct ce_attr pci_host_ce_config_wlan[] = {
0120     /* CE0: host->target HTC control and raw streams */
0121     {
0122         .flags = CE_ATTR_FLAGS,
0123         .src_nentries = 16,
0124         .src_sz_max = 256,
0125         .dest_nentries = 0,
0126         .send_cb = ath10k_pci_htc_tx_cb,
0127     },
0128 
0129     /* CE1: target->host HTT + HTC control */
0130     {
0131         .flags = CE_ATTR_FLAGS,
0132         .src_nentries = 0,
0133         .src_sz_max = 2048,
0134         .dest_nentries = 512,
0135         .recv_cb = ath10k_pci_htt_htc_rx_cb,
0136     },
0137 
0138     /* CE2: target->host WMI */
0139     {
0140         .flags = CE_ATTR_FLAGS,
0141         .src_nentries = 0,
0142         .src_sz_max = 2048,
0143         .dest_nentries = 128,
0144         .recv_cb = ath10k_pci_htc_rx_cb,
0145     },
0146 
0147     /* CE3: host->target WMI */
0148     {
0149         .flags = CE_ATTR_FLAGS,
0150         .src_nentries = 32,
0151         .src_sz_max = 2048,
0152         .dest_nentries = 0,
0153         .send_cb = ath10k_pci_htc_tx_cb,
0154     },
0155 
0156     /* CE4: host->target HTT */
0157     {
0158         .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
0159         .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
0160         .src_sz_max = 256,
0161         .dest_nentries = 0,
0162         .send_cb = ath10k_pci_htt_tx_cb,
0163     },
0164 
0165     /* CE5: target->host HTT (HIF->HTT) */
0166     {
0167         .flags = CE_ATTR_FLAGS,
0168         .src_nentries = 0,
0169         .src_sz_max = 512,
0170         .dest_nentries = 512,
0171         .recv_cb = ath10k_pci_htt_rx_cb,
0172     },
0173 
0174     /* CE6: target autonomous hif_memcpy */
0175     {
0176         .flags = CE_ATTR_FLAGS,
0177         .src_nentries = 0,
0178         .src_sz_max = 0,
0179         .dest_nentries = 0,
0180     },
0181 
0182     /* CE7: ce_diag, the Diagnostic Window */
0183     {
0184         .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
0185         .src_nentries = 2,
0186         .src_sz_max = DIAG_TRANSFER_LIMIT,
0187         .dest_nentries = 2,
0188     },
0189 
0190     /* CE8: target->host pktlog */
0191     {
0192         .flags = CE_ATTR_FLAGS,
0193         .src_nentries = 0,
0194         .src_sz_max = 2048,
0195         .dest_nentries = 128,
0196         .recv_cb = ath10k_pci_pktlog_rx_cb,
0197     },
0198 
0199     /* CE9 target autonomous qcache memcpy */
0200     {
0201         .flags = CE_ATTR_FLAGS,
0202         .src_nentries = 0,
0203         .src_sz_max = 0,
0204         .dest_nentries = 0,
0205     },
0206 
0207     /* CE10: target autonomous hif memcpy */
0208     {
0209         .flags = CE_ATTR_FLAGS,
0210         .src_nentries = 0,
0211         .src_sz_max = 0,
0212         .dest_nentries = 0,
0213     },
0214 
0215     /* CE11: target autonomous hif memcpy */
0216     {
0217         .flags = CE_ATTR_FLAGS,
0218         .src_nentries = 0,
0219         .src_sz_max = 0,
0220         .dest_nentries = 0,
0221     },
0222 };
0223 
0224 /* Target firmware's Copy Engine configuration. */
0225 static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
0226     /* CE0: host->target HTC control and raw streams */
0227     {
0228         .pipenum = __cpu_to_le32(0),
0229         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0230         .nentries = __cpu_to_le32(32),
0231         .nbytes_max = __cpu_to_le32(256),
0232         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0233         .reserved = __cpu_to_le32(0),
0234     },
0235 
0236     /* CE1: target->host HTT + HTC control */
0237     {
0238         .pipenum = __cpu_to_le32(1),
0239         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0240         .nentries = __cpu_to_le32(32),
0241         .nbytes_max = __cpu_to_le32(2048),
0242         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0243         .reserved = __cpu_to_le32(0),
0244     },
0245 
0246     /* CE2: target->host WMI */
0247     {
0248         .pipenum = __cpu_to_le32(2),
0249         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0250         .nentries = __cpu_to_le32(64),
0251         .nbytes_max = __cpu_to_le32(2048),
0252         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0253         .reserved = __cpu_to_le32(0),
0254     },
0255 
0256     /* CE3: host->target WMI */
0257     {
0258         .pipenum = __cpu_to_le32(3),
0259         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0260         .nentries = __cpu_to_le32(32),
0261         .nbytes_max = __cpu_to_le32(2048),
0262         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0263         .reserved = __cpu_to_le32(0),
0264     },
0265 
0266     /* CE4: host->target HTT */
0267     {
0268         .pipenum = __cpu_to_le32(4),
0269         .pipedir = __cpu_to_le32(PIPEDIR_OUT),
0270         .nentries = __cpu_to_le32(256),
0271         .nbytes_max = __cpu_to_le32(256),
0272         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0273         .reserved = __cpu_to_le32(0),
0274     },
0275 
0276     /* NB: 50% of src nentries, since tx has 2 frags */
0277 
0278     /* CE5: target->host HTT (HIF->HTT) */
0279     {
0280         .pipenum = __cpu_to_le32(5),
0281         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0282         .nentries = __cpu_to_le32(32),
0283         .nbytes_max = __cpu_to_le32(512),
0284         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0285         .reserved = __cpu_to_le32(0),
0286     },
0287 
0288     /* CE6: Reserved for target autonomous hif_memcpy */
0289     {
0290         .pipenum = __cpu_to_le32(6),
0291         .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
0292         .nentries = __cpu_to_le32(32),
0293         .nbytes_max = __cpu_to_le32(4096),
0294         .flags = __cpu_to_le32(CE_ATTR_FLAGS),
0295         .reserved = __cpu_to_le32(0),
0296     },
0297 
0298     /* CE7 used only by Host */
0299     {
0300         .pipenum = __cpu_to_le32(7),
0301         .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
0302         .nentries = __cpu_to_le32(0),
0303         .nbytes_max = __cpu_to_le32(0),
0304         .flags = __cpu_to_le32(0),
0305         .reserved = __cpu_to_le32(0),
0306     },
0307 
0308     /* CE8 target->host packtlog */
0309     {
0310         .pipenum = __cpu_to_le32(8),
0311         .pipedir = __cpu_to_le32(PIPEDIR_IN),
0312         .nentries = __cpu_to_le32(64),
0313         .nbytes_max = __cpu_to_le32(2048),
0314         .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
0315         .reserved = __cpu_to_le32(0),
0316     },
0317 
0318     /* CE9 target autonomous qcache memcpy */
0319     {
0320         .pipenum = __cpu_to_le32(9),
0321         .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
0322         .nentries = __cpu_to_le32(32),
0323         .nbytes_max = __cpu_to_le32(2048),
0324         .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
0325         .reserved = __cpu_to_le32(0),
0326     },
0327 
0328     /* It not necessary to send target wlan configuration for CE10 & CE11
0329      * as these CEs are not actively used in target.
0330      */
0331 };
0332 
0333 /*
0334  * Map from service/endpoint to Copy Engine.
0335  * This table is derived from the CE_PCI TABLE, above.
0336  * It is passed to the Target at startup for use by firmware.
0337  */
0338 static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
0339     {
0340         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
0341         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0342         __cpu_to_le32(3),
0343     },
0344     {
0345         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
0346         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0347         __cpu_to_le32(2),
0348     },
0349     {
0350         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
0351         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0352         __cpu_to_le32(3),
0353     },
0354     {
0355         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
0356         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0357         __cpu_to_le32(2),
0358     },
0359     {
0360         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
0361         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0362         __cpu_to_le32(3),
0363     },
0364     {
0365         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
0366         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0367         __cpu_to_le32(2),
0368     },
0369     {
0370         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
0371         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0372         __cpu_to_le32(3),
0373     },
0374     {
0375         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
0376         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0377         __cpu_to_le32(2),
0378     },
0379     {
0380         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
0381         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0382         __cpu_to_le32(3),
0383     },
0384     {
0385         __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
0386         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0387         __cpu_to_le32(2),
0388     },
0389     {
0390         __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
0391         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0392         __cpu_to_le32(0),
0393     },
0394     {
0395         __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
0396         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0397         __cpu_to_le32(1),
0398     },
0399     { /* not used */
0400         __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
0401         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0402         __cpu_to_le32(0),
0403     },
0404     { /* not used */
0405         __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
0406         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0407         __cpu_to_le32(1),
0408     },
0409     {
0410         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
0411         __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
0412         __cpu_to_le32(4),
0413     },
0414     {
0415         __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
0416         __cpu_to_le32(PIPEDIR_IN),  /* in = DL = target -> host */
0417         __cpu_to_le32(5),
0418     },
0419 
0420     /* (Additions here) */
0421 
0422     { /* must be last */
0423         __cpu_to_le32(0),
0424         __cpu_to_le32(0),
0425         __cpu_to_le32(0),
0426     },
0427 };
0428 
0429 static bool ath10k_pci_is_awake(struct ath10k *ar)
0430 {
0431     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0432     u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
0433                RTC_STATE_ADDRESS);
0434 
0435     return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
0436 }
0437 
0438 static void __ath10k_pci_wake(struct ath10k *ar)
0439 {
0440     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0441 
0442     lockdep_assert_held(&ar_pci->ps_lock);
0443 
0444     ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
0445            ar_pci->ps_wake_refcount, ar_pci->ps_awake);
0446 
0447     iowrite32(PCIE_SOC_WAKE_V_MASK,
0448           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
0449           PCIE_SOC_WAKE_ADDRESS);
0450 }
0451 
0452 static void __ath10k_pci_sleep(struct ath10k *ar)
0453 {
0454     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0455 
0456     lockdep_assert_held(&ar_pci->ps_lock);
0457 
0458     ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
0459            ar_pci->ps_wake_refcount, ar_pci->ps_awake);
0460 
0461     iowrite32(PCIE_SOC_WAKE_RESET,
0462           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
0463           PCIE_SOC_WAKE_ADDRESS);
0464     ar_pci->ps_awake = false;
0465 }
0466 
0467 static int ath10k_pci_wake_wait(struct ath10k *ar)
0468 {
0469     int tot_delay = 0;
0470     int curr_delay = 5;
0471 
0472     while (tot_delay < PCIE_WAKE_TIMEOUT) {
0473         if (ath10k_pci_is_awake(ar)) {
0474             if (tot_delay > PCIE_WAKE_LATE_US)
0475                 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
0476                         tot_delay / 1000);
0477             return 0;
0478         }
0479 
0480         udelay(curr_delay);
0481         tot_delay += curr_delay;
0482 
0483         if (curr_delay < 50)
0484             curr_delay += 5;
0485     }
0486 
0487     return -ETIMEDOUT;
0488 }
0489 
0490 static int ath10k_pci_force_wake(struct ath10k *ar)
0491 {
0492     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0493     unsigned long flags;
0494     int ret = 0;
0495 
0496     if (ar_pci->pci_ps)
0497         return ret;
0498 
0499     spin_lock_irqsave(&ar_pci->ps_lock, flags);
0500 
0501     if (!ar_pci->ps_awake) {
0502         iowrite32(PCIE_SOC_WAKE_V_MASK,
0503               ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
0504               PCIE_SOC_WAKE_ADDRESS);
0505 
0506         ret = ath10k_pci_wake_wait(ar);
0507         if (ret == 0)
0508             ar_pci->ps_awake = true;
0509     }
0510 
0511     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
0512 
0513     return ret;
0514 }
0515 
0516 static void ath10k_pci_force_sleep(struct ath10k *ar)
0517 {
0518     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0519     unsigned long flags;
0520 
0521     spin_lock_irqsave(&ar_pci->ps_lock, flags);
0522 
0523     iowrite32(PCIE_SOC_WAKE_RESET,
0524           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
0525           PCIE_SOC_WAKE_ADDRESS);
0526     ar_pci->ps_awake = false;
0527 
0528     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
0529 }
0530 
0531 static int ath10k_pci_wake(struct ath10k *ar)
0532 {
0533     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0534     unsigned long flags;
0535     int ret = 0;
0536 
0537     if (ar_pci->pci_ps == 0)
0538         return ret;
0539 
0540     spin_lock_irqsave(&ar_pci->ps_lock, flags);
0541 
0542     ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
0543            ar_pci->ps_wake_refcount, ar_pci->ps_awake);
0544 
0545     /* This function can be called very frequently. To avoid excessive
0546      * CPU stalls for MMIO reads use a cache var to hold the device state.
0547      */
0548     if (!ar_pci->ps_awake) {
0549         __ath10k_pci_wake(ar);
0550 
0551         ret = ath10k_pci_wake_wait(ar);
0552         if (ret == 0)
0553             ar_pci->ps_awake = true;
0554     }
0555 
0556     if (ret == 0) {
0557         ar_pci->ps_wake_refcount++;
0558         WARN_ON(ar_pci->ps_wake_refcount == 0);
0559     }
0560 
0561     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
0562 
0563     return ret;
0564 }
0565 
0566 static void ath10k_pci_sleep(struct ath10k *ar)
0567 {
0568     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0569     unsigned long flags;
0570 
0571     if (ar_pci->pci_ps == 0)
0572         return;
0573 
0574     spin_lock_irqsave(&ar_pci->ps_lock, flags);
0575 
0576     ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
0577            ar_pci->ps_wake_refcount, ar_pci->ps_awake);
0578 
0579     if (WARN_ON(ar_pci->ps_wake_refcount == 0))
0580         goto skip;
0581 
0582     ar_pci->ps_wake_refcount--;
0583 
0584     mod_timer(&ar_pci->ps_timer, jiffies +
0585           msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
0586 
0587 skip:
0588     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
0589 }
0590 
0591 static void ath10k_pci_ps_timer(struct timer_list *t)
0592 {
0593     struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
0594     struct ath10k *ar = ar_pci->ar;
0595     unsigned long flags;
0596 
0597     spin_lock_irqsave(&ar_pci->ps_lock, flags);
0598 
0599     ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
0600            ar_pci->ps_wake_refcount, ar_pci->ps_awake);
0601 
0602     if (ar_pci->ps_wake_refcount > 0)
0603         goto skip;
0604 
0605     __ath10k_pci_sleep(ar);
0606 
0607 skip:
0608     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
0609 }
0610 
0611 static void ath10k_pci_sleep_sync(struct ath10k *ar)
0612 {
0613     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0614     unsigned long flags;
0615 
0616     if (ar_pci->pci_ps == 0) {
0617         ath10k_pci_force_sleep(ar);
0618         return;
0619     }
0620 
0621     del_timer_sync(&ar_pci->ps_timer);
0622 
0623     spin_lock_irqsave(&ar_pci->ps_lock, flags);
0624     WARN_ON(ar_pci->ps_wake_refcount > 0);
0625     __ath10k_pci_sleep(ar);
0626     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
0627 }
0628 
0629 static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
0630 {
0631     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0632     int ret;
0633 
0634     if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
0635         ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
0636                 offset, offset + sizeof(value), ar_pci->mem_len);
0637         return;
0638     }
0639 
0640     ret = ath10k_pci_wake(ar);
0641     if (ret) {
0642         ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
0643                 value, offset, ret);
0644         return;
0645     }
0646 
0647     iowrite32(value, ar_pci->mem + offset);
0648     ath10k_pci_sleep(ar);
0649 }
0650 
0651 static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
0652 {
0653     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0654     u32 val;
0655     int ret;
0656 
0657     if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
0658         ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
0659                 offset, offset + sizeof(val), ar_pci->mem_len);
0660         return 0;
0661     }
0662 
0663     ret = ath10k_pci_wake(ar);
0664     if (ret) {
0665         ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
0666                 offset, ret);
0667         return 0xffffffff;
0668     }
0669 
0670     val = ioread32(ar_pci->mem + offset);
0671     ath10k_pci_sleep(ar);
0672 
0673     return val;
0674 }
0675 
0676 inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
0677 {
0678     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0679 
0680     ce->bus_ops->write32(ar, offset, value);
0681 }
0682 
0683 inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
0684 {
0685     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0686 
0687     return ce->bus_ops->read32(ar, offset);
0688 }
0689 
0690 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
0691 {
0692     return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
0693 }
0694 
0695 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
0696 {
0697     ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
0698 }
0699 
0700 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
0701 {
0702     return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
0703 }
0704 
0705 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
0706 {
0707     ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
0708 }
0709 
0710 bool ath10k_pci_irq_pending(struct ath10k *ar)
0711 {
0712     u32 cause;
0713 
0714     /* Check if the shared legacy irq is for us */
0715     cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
0716                   PCIE_INTR_CAUSE_ADDRESS);
0717     if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
0718         return true;
0719 
0720     return false;
0721 }
0722 
0723 void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
0724 {
0725     /* IMPORTANT: INTR_CLR register has to be set after
0726      * INTR_ENABLE is set to 0, otherwise interrupt can not be
0727      * really cleared.
0728      */
0729     ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0730                0);
0731     ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
0732                PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
0733 
0734     /* IMPORTANT: this extra read transaction is required to
0735      * flush the posted write buffer.
0736      */
0737     (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
0738                 PCIE_INTR_ENABLE_ADDRESS);
0739 }
0740 
0741 void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
0742 {
0743     ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
0744                PCIE_INTR_ENABLE_ADDRESS,
0745                PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
0746 
0747     /* IMPORTANT: this extra read transaction is required to
0748      * flush the posted write buffer.
0749      */
0750     (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
0751                 PCIE_INTR_ENABLE_ADDRESS);
0752 }
0753 
0754 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
0755 {
0756     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0757 
0758     if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
0759         return "msi";
0760 
0761     return "legacy";
0762 }
0763 
0764 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
0765 {
0766     struct ath10k *ar = pipe->hif_ce_state;
0767     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0768     struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
0769     struct sk_buff *skb;
0770     dma_addr_t paddr;
0771     int ret;
0772 
0773     skb = dev_alloc_skb(pipe->buf_sz);
0774     if (!skb)
0775         return -ENOMEM;
0776 
0777     WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
0778 
0779     paddr = dma_map_single(ar->dev, skb->data,
0780                    skb->len + skb_tailroom(skb),
0781                    DMA_FROM_DEVICE);
0782     if (unlikely(dma_mapping_error(ar->dev, paddr))) {
0783         ath10k_warn(ar, "failed to dma map pci rx buf\n");
0784         dev_kfree_skb_any(skb);
0785         return -EIO;
0786     }
0787 
0788     ATH10K_SKB_RXCB(skb)->paddr = paddr;
0789 
0790     spin_lock_bh(&ce->ce_lock);
0791     ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
0792     spin_unlock_bh(&ce->ce_lock);
0793     if (ret) {
0794         dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
0795                  DMA_FROM_DEVICE);
0796         dev_kfree_skb_any(skb);
0797         return ret;
0798     }
0799 
0800     return 0;
0801 }
0802 
0803 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
0804 {
0805     struct ath10k *ar = pipe->hif_ce_state;
0806     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0807     struct ath10k_ce *ce = ath10k_ce_priv(ar);
0808     struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
0809     int ret, num;
0810 
0811     if (pipe->buf_sz == 0)
0812         return;
0813 
0814     if (!ce_pipe->dest_ring)
0815         return;
0816 
0817     spin_lock_bh(&ce->ce_lock);
0818     num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
0819     spin_unlock_bh(&ce->ce_lock);
0820 
0821     while (num >= 0) {
0822         ret = __ath10k_pci_rx_post_buf(pipe);
0823         if (ret) {
0824             if (ret == -ENOSPC)
0825                 break;
0826             ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
0827             mod_timer(&ar_pci->rx_post_retry, jiffies +
0828                   ATH10K_PCI_RX_POST_RETRY_MS);
0829             break;
0830         }
0831         num--;
0832     }
0833 }
0834 
0835 void ath10k_pci_rx_post(struct ath10k *ar)
0836 {
0837     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0838     int i;
0839 
0840     for (i = 0; i < CE_COUNT; i++)
0841         ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
0842 }
0843 
0844 void ath10k_pci_rx_replenish_retry(struct timer_list *t)
0845 {
0846     struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
0847     struct ath10k *ar = ar_pci->ar;
0848 
0849     ath10k_pci_rx_post(ar);
0850 }
0851 
0852 static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
0853 {
0854     u32 val = 0, region = addr & 0xfffff;
0855 
0856     val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
0857                  & 0x7ff) << 21;
0858     val |= 0x100000 | region;
0859     return val;
0860 }
0861 
0862 /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
0863  * Support to access target space below 1M for qca6174 and qca9377.
0864  * If target space is below 1M, the bit[20] of converted CE addr is 0.
0865  * Otherwise bit[20] of converted CE addr is 1.
0866  */
0867 static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
0868 {
0869     u32 val = 0, region = addr & 0xfffff;
0870 
0871     val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
0872                  & 0x7ff) << 21;
0873     val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
0874     return val;
0875 }
0876 
0877 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
0878 {
0879     u32 val = 0, region = addr & 0xfffff;
0880 
0881     val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
0882     val |= 0x100000 | region;
0883     return val;
0884 }
0885 
0886 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
0887 {
0888     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0889 
0890     if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
0891         return -ENOTSUPP;
0892 
0893     return ar_pci->targ_cpu_to_ce_addr(ar, addr);
0894 }
0895 
0896 /*
0897  * Diagnostic read/write access is provided for startup/config/debug usage.
0898  * Caller must guarantee proper alignment, when applicable, and single user
0899  * at any moment.
0900  */
0901 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
0902                     int nbytes)
0903 {
0904     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
0905     int ret = 0;
0906     u32 *buf;
0907     unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
0908     struct ath10k_ce_pipe *ce_diag;
0909     /* Host buffer address in CE space */
0910     u32 ce_data;
0911     dma_addr_t ce_data_base = 0;
0912     void *data_buf;
0913     int i;
0914 
0915     mutex_lock(&ar_pci->ce_diag_mutex);
0916     ce_diag = ar_pci->ce_diag;
0917 
0918     /*
0919      * Allocate a temporary bounce buffer to hold caller's data
0920      * to be DMA'ed from Target. This guarantees
0921      *   1) 4-byte alignment
0922      *   2) Buffer in DMA-able space
0923      */
0924     alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
0925 
0926     data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
0927                       GFP_ATOMIC);
0928     if (!data_buf) {
0929         ret = -ENOMEM;
0930         goto done;
0931     }
0932 
0933     /* The address supplied by the caller is in the
0934      * Target CPU virtual address space.
0935      *
0936      * In order to use this address with the diagnostic CE,
0937      * convert it from Target CPU virtual address space
0938      * to CE address space
0939      */
0940     address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
0941 
0942     remaining_bytes = nbytes;
0943     ce_data = ce_data_base;
0944     while (remaining_bytes) {
0945         nbytes = min_t(unsigned int, remaining_bytes,
0946                    DIAG_TRANSFER_LIMIT);
0947 
0948         ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
0949         if (ret != 0)
0950             goto done;
0951 
0952         /* Request CE to send from Target(!) address to Host buffer */
0953         ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
0954         if (ret)
0955             goto done;
0956 
0957         i = 0;
0958         while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
0959             udelay(DIAG_ACCESS_CE_WAIT_US);
0960             i += DIAG_ACCESS_CE_WAIT_US;
0961 
0962             if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
0963                 ret = -EBUSY;
0964                 goto done;
0965             }
0966         }
0967 
0968         i = 0;
0969         while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
0970                              &completed_nbytes) != 0) {
0971             udelay(DIAG_ACCESS_CE_WAIT_US);
0972             i += DIAG_ACCESS_CE_WAIT_US;
0973 
0974             if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
0975                 ret = -EBUSY;
0976                 goto done;
0977             }
0978         }
0979 
0980         if (nbytes != completed_nbytes) {
0981             ret = -EIO;
0982             goto done;
0983         }
0984 
0985         if (*buf != ce_data) {
0986             ret = -EIO;
0987             goto done;
0988         }
0989 
0990         remaining_bytes -= nbytes;
0991         memcpy(data, data_buf, nbytes);
0992 
0993         address += nbytes;
0994         data += nbytes;
0995     }
0996 
0997 done:
0998 
0999     if (data_buf)
1000         dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1001                   ce_data_base);
1002 
1003     mutex_unlock(&ar_pci->ce_diag_mutex);
1004 
1005     return ret;
1006 }
1007 
1008 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1009 {
1010     __le32 val = 0;
1011     int ret;
1012 
1013     ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1014     *value = __le32_to_cpu(val);
1015 
1016     return ret;
1017 }
1018 
1019 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1020                      u32 src, u32 len)
1021 {
1022     u32 host_addr, addr;
1023     int ret;
1024 
1025     host_addr = host_interest_item_address(src);
1026 
1027     ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1028     if (ret != 0) {
1029         ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1030                 src, ret);
1031         return ret;
1032     }
1033 
1034     ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1035     if (ret != 0) {
1036         ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1037                 addr, len, ret);
1038         return ret;
1039     }
1040 
1041     return 0;
1042 }
1043 
1044 #define ath10k_pci_diag_read_hi(ar, dest, src, len)     \
1045     __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1046 
1047 int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1048                   const void *data, int nbytes)
1049 {
1050     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1051     int ret = 0;
1052     u32 *buf;
1053     unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1054     struct ath10k_ce_pipe *ce_diag;
1055     void *data_buf;
1056     dma_addr_t ce_data_base = 0;
1057     int i;
1058 
1059     mutex_lock(&ar_pci->ce_diag_mutex);
1060     ce_diag = ar_pci->ce_diag;
1061 
1062     /*
1063      * Allocate a temporary bounce buffer to hold caller's data
1064      * to be DMA'ed to Target. This guarantees
1065      *   1) 4-byte alignment
1066      *   2) Buffer in DMA-able space
1067      */
1068     alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1069 
1070     data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
1071                       GFP_ATOMIC);
1072     if (!data_buf) {
1073         ret = -ENOMEM;
1074         goto done;
1075     }
1076 
1077     /*
1078      * The address supplied by the caller is in the
1079      * Target CPU virtual address space.
1080      *
1081      * In order to use this address with the diagnostic CE,
1082      * convert it from
1083      *    Target CPU virtual address space
1084      * to
1085      *    CE address space
1086      */
1087     address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1088 
1089     remaining_bytes = nbytes;
1090     while (remaining_bytes) {
1091         /* FIXME: check cast */
1092         nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1093 
1094         /* Copy caller's data to allocated DMA buf */
1095         memcpy(data_buf, data, nbytes);
1096 
1097         /* Set up to receive directly into Target(!) address */
1098         ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1099         if (ret != 0)
1100             goto done;
1101 
1102         /*
1103          * Request CE to send caller-supplied data that
1104          * was copied to bounce buffer to Target(!) address.
1105          */
1106         ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1107         if (ret != 0)
1108             goto done;
1109 
1110         i = 0;
1111         while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1112             udelay(DIAG_ACCESS_CE_WAIT_US);
1113             i += DIAG_ACCESS_CE_WAIT_US;
1114 
1115             if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1116                 ret = -EBUSY;
1117                 goto done;
1118             }
1119         }
1120 
1121         i = 0;
1122         while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1123                              &completed_nbytes) != 0) {
1124             udelay(DIAG_ACCESS_CE_WAIT_US);
1125             i += DIAG_ACCESS_CE_WAIT_US;
1126 
1127             if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1128                 ret = -EBUSY;
1129                 goto done;
1130             }
1131         }
1132 
1133         if (nbytes != completed_nbytes) {
1134             ret = -EIO;
1135             goto done;
1136         }
1137 
1138         if (*buf != address) {
1139             ret = -EIO;
1140             goto done;
1141         }
1142 
1143         remaining_bytes -= nbytes;
1144         address += nbytes;
1145         data += nbytes;
1146     }
1147 
1148 done:
1149     if (data_buf) {
1150         dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1151                   ce_data_base);
1152     }
1153 
1154     if (ret != 0)
1155         ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1156                 address, ret);
1157 
1158     mutex_unlock(&ar_pci->ce_diag_mutex);
1159 
1160     return ret;
1161 }
1162 
1163 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1164 {
1165     __le32 val = __cpu_to_le32(value);
1166 
1167     return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1168 }
1169 
1170 /* Called by lower (CE) layer when a send to Target completes. */
1171 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1172 {
1173     struct ath10k *ar = ce_state->ar;
1174     struct sk_buff_head list;
1175     struct sk_buff *skb;
1176 
1177     __skb_queue_head_init(&list);
1178     while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1179         /* no need to call tx completion for NULL pointers */
1180         if (skb == NULL)
1181             continue;
1182 
1183         __skb_queue_tail(&list, skb);
1184     }
1185 
1186     while ((skb = __skb_dequeue(&list)))
1187         ath10k_htc_tx_completion_handler(ar, skb);
1188 }
1189 
1190 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1191                      void (*callback)(struct ath10k *ar,
1192                               struct sk_buff *skb))
1193 {
1194     struct ath10k *ar = ce_state->ar;
1195     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1196     struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1197     struct sk_buff *skb;
1198     struct sk_buff_head list;
1199     void *transfer_context;
1200     unsigned int nbytes, max_nbytes;
1201 
1202     __skb_queue_head_init(&list);
1203     while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1204                          &nbytes) == 0) {
1205         skb = transfer_context;
1206         max_nbytes = skb->len + skb_tailroom(skb);
1207         dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1208                  max_nbytes, DMA_FROM_DEVICE);
1209 
1210         if (unlikely(max_nbytes < nbytes)) {
1211             ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1212                     nbytes, max_nbytes);
1213             dev_kfree_skb_any(skb);
1214             continue;
1215         }
1216 
1217         skb_put(skb, nbytes);
1218         __skb_queue_tail(&list, skb);
1219     }
1220 
1221     while ((skb = __skb_dequeue(&list))) {
1222         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1223                ce_state->id, skb->len);
1224         ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1225                 skb->data, skb->len);
1226 
1227         callback(ar, skb);
1228     }
1229 
1230     ath10k_pci_rx_post_pipe(pipe_info);
1231 }
1232 
1233 static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1234                      void (*callback)(struct ath10k *ar,
1235                               struct sk_buff *skb))
1236 {
1237     struct ath10k *ar = ce_state->ar;
1238     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1239     struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1240     struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1241     struct sk_buff *skb;
1242     struct sk_buff_head list;
1243     void *transfer_context;
1244     unsigned int nbytes, max_nbytes, nentries;
1245     int orig_len;
1246 
1247     /* No need to aquire ce_lock for CE5, since this is the only place CE5
1248      * is processed other than init and deinit. Before releasing CE5
1249      * buffers, interrupts are disabled. Thus CE5 access is serialized.
1250      */
1251     __skb_queue_head_init(&list);
1252     while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1253                             &nbytes) == 0) {
1254         skb = transfer_context;
1255         max_nbytes = skb->len + skb_tailroom(skb);
1256 
1257         if (unlikely(max_nbytes < nbytes)) {
1258             ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1259                     nbytes, max_nbytes);
1260             continue;
1261         }
1262 
1263         dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1264                     max_nbytes, DMA_FROM_DEVICE);
1265         skb_put(skb, nbytes);
1266         __skb_queue_tail(&list, skb);
1267     }
1268 
1269     nentries = skb_queue_len(&list);
1270     while ((skb = __skb_dequeue(&list))) {
1271         ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1272                ce_state->id, skb->len);
1273         ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1274                 skb->data, skb->len);
1275 
1276         orig_len = skb->len;
1277         callback(ar, skb);
1278         skb_push(skb, orig_len - skb->len);
1279         skb_reset_tail_pointer(skb);
1280         skb_trim(skb, 0);
1281 
1282         /*let device gain the buffer again*/
1283         dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1284                        skb->len + skb_tailroom(skb),
1285                        DMA_FROM_DEVICE);
1286     }
1287     ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1288 }
1289 
1290 /* Called by lower (CE) layer when data is received from the Target. */
1291 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1292 {
1293     ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1294 }
1295 
1296 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1297 {
1298     /* CE4 polling needs to be done whenever CE pipe which transports
1299      * HTT Rx (target->host) is processed.
1300      */
1301     ath10k_ce_per_engine_service(ce_state->ar, 4);
1302 
1303     ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1304 }
1305 
1306 /* Called by lower (CE) layer when data is received from the Target.
1307  * Only 10.4 firmware uses separate CE to transfer pktlog data.
1308  */
1309 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1310 {
1311     ath10k_pci_process_rx_cb(ce_state,
1312                  ath10k_htt_rx_pktlog_completion_handler);
1313 }
1314 
1315 /* Called by lower (CE) layer when a send to HTT Target completes. */
1316 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1317 {
1318     struct ath10k *ar = ce_state->ar;
1319     struct sk_buff *skb;
1320 
1321     while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1322         /* no need to call tx completion for NULL pointers */
1323         if (!skb)
1324             continue;
1325 
1326         dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1327                  skb->len, DMA_TO_DEVICE);
1328         ath10k_htt_hif_tx_complete(ar, skb);
1329     }
1330 }
1331 
1332 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1333 {
1334     skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1335     ath10k_htt_t2h_msg_handler(ar, skb);
1336 }
1337 
1338 /* Called by lower (CE) layer when HTT data is received from the Target. */
1339 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1340 {
1341     /* CE4 polling needs to be done whenever CE pipe which transports
1342      * HTT Rx (target->host) is processed.
1343      */
1344     ath10k_ce_per_engine_service(ce_state->ar, 4);
1345 
1346     ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1347 }
1348 
1349 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1350              struct ath10k_hif_sg_item *items, int n_items)
1351 {
1352     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1353     struct ath10k_ce *ce = ath10k_ce_priv(ar);
1354     struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1355     struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1356     struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1357     unsigned int nentries_mask;
1358     unsigned int sw_index;
1359     unsigned int write_index;
1360     int err, i = 0;
1361 
1362     spin_lock_bh(&ce->ce_lock);
1363 
1364     nentries_mask = src_ring->nentries_mask;
1365     sw_index = src_ring->sw_index;
1366     write_index = src_ring->write_index;
1367 
1368     if (unlikely(CE_RING_DELTA(nentries_mask,
1369                    write_index, sw_index - 1) < n_items)) {
1370         err = -ENOBUFS;
1371         goto err;
1372     }
1373 
1374     for (i = 0; i < n_items - 1; i++) {
1375         ath10k_dbg(ar, ATH10K_DBG_PCI,
1376                "pci tx item %d paddr %pad len %d n_items %d\n",
1377                i, &items[i].paddr, items[i].len, n_items);
1378         ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1379                 items[i].vaddr, items[i].len);
1380 
1381         err = ath10k_ce_send_nolock(ce_pipe,
1382                         items[i].transfer_context,
1383                         items[i].paddr,
1384                         items[i].len,
1385                         items[i].transfer_id,
1386                         CE_SEND_FLAG_GATHER);
1387         if (err)
1388             goto err;
1389     }
1390 
1391     /* `i` is equal to `n_items -1` after for() */
1392 
1393     ath10k_dbg(ar, ATH10K_DBG_PCI,
1394            "pci tx item %d paddr %pad len %d n_items %d\n",
1395            i, &items[i].paddr, items[i].len, n_items);
1396     ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1397             items[i].vaddr, items[i].len);
1398 
1399     err = ath10k_ce_send_nolock(ce_pipe,
1400                     items[i].transfer_context,
1401                     items[i].paddr,
1402                     items[i].len,
1403                     items[i].transfer_id,
1404                     0);
1405     if (err)
1406         goto err;
1407 
1408     spin_unlock_bh(&ce->ce_lock);
1409     return 0;
1410 
1411 err:
1412     for (; i > 0; i--)
1413         __ath10k_ce_send_revert(ce_pipe);
1414 
1415     spin_unlock_bh(&ce->ce_lock);
1416     return err;
1417 }
1418 
1419 int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1420                  size_t buf_len)
1421 {
1422     return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1423 }
1424 
1425 u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1426 {
1427     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1428 
1429     ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1430 
1431     return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1432 }
1433 
1434 static void ath10k_pci_dump_registers(struct ath10k *ar,
1435                       struct ath10k_fw_crash_data *crash_data)
1436 {
1437     __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1438     int i, ret;
1439 
1440     lockdep_assert_held(&ar->dump_mutex);
1441 
1442     ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1443                       hi_failure_state,
1444                       REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1445     if (ret) {
1446         ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1447         return;
1448     }
1449 
1450     BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1451 
1452     ath10k_err(ar, "firmware register dump:\n");
1453     for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1454         ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1455                i,
1456                __le32_to_cpu(reg_dump_values[i]),
1457                __le32_to_cpu(reg_dump_values[i + 1]),
1458                __le32_to_cpu(reg_dump_values[i + 2]),
1459                __le32_to_cpu(reg_dump_values[i + 3]));
1460 
1461     if (!crash_data)
1462         return;
1463 
1464     for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1465         crash_data->registers[i] = reg_dump_values[i];
1466 }
1467 
1468 static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1469                       const struct ath10k_mem_region *mem_region,
1470                       u8 *buf, size_t buf_len)
1471 {
1472     const struct ath10k_mem_section *cur_section, *next_section;
1473     unsigned int count, section_size, skip_size;
1474     int ret, i, j;
1475 
1476     if (!mem_region || !buf)
1477         return 0;
1478 
1479     cur_section = &mem_region->section_table.sections[0];
1480 
1481     if (mem_region->start > cur_section->start) {
1482         ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1483                 mem_region->start, cur_section->start);
1484         return 0;
1485     }
1486 
1487     skip_size = cur_section->start - mem_region->start;
1488 
1489     /* fill the gap between the first register section and register
1490      * start address
1491      */
1492     for (i = 0; i < skip_size; i++) {
1493         *buf = ATH10K_MAGIC_NOT_COPIED;
1494         buf++;
1495     }
1496 
1497     count = 0;
1498 
1499     for (i = 0; cur_section != NULL; i++) {
1500         section_size = cur_section->end - cur_section->start;
1501 
1502         if (section_size <= 0) {
1503             ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1504                     cur_section->start,
1505                     cur_section->end);
1506             break;
1507         }
1508 
1509         if ((i + 1) == mem_region->section_table.size) {
1510             /* last section */
1511             next_section = NULL;
1512             skip_size = 0;
1513         } else {
1514             next_section = cur_section + 1;
1515 
1516             if (cur_section->end > next_section->start) {
1517                 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1518                         next_section->start,
1519                         cur_section->end);
1520                 break;
1521             }
1522 
1523             skip_size = next_section->start - cur_section->end;
1524         }
1525 
1526         if (buf_len < (skip_size + section_size)) {
1527             ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1528             break;
1529         }
1530 
1531         buf_len -= skip_size + section_size;
1532 
1533         /* read section to dest memory */
1534         ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1535                            buf, section_size);
1536         if (ret) {
1537             ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1538                     cur_section->start, ret);
1539             break;
1540         }
1541 
1542         buf += section_size;
1543         count += section_size;
1544 
1545         /* fill in the gap between this section and the next */
1546         for (j = 0; j < skip_size; j++) {
1547             *buf = ATH10K_MAGIC_NOT_COPIED;
1548             buf++;
1549         }
1550 
1551         count += skip_size;
1552 
1553         if (!next_section)
1554             /* this was the last section */
1555             break;
1556 
1557         cur_section = next_section;
1558     }
1559 
1560     return count;
1561 }
1562 
1563 static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1564 {
1565     u32 val;
1566 
1567     ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1568                FW_RAM_CONFIG_ADDRESS, config);
1569 
1570     val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1571                 FW_RAM_CONFIG_ADDRESS);
1572     if (val != config) {
1573         ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1574                 val, config);
1575         return -EIO;
1576     }
1577 
1578     return 0;
1579 }
1580 
1581 /* Always returns the length */
1582 static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1583                        const struct ath10k_mem_region *region,
1584                        u8 *buf)
1585 {
1586     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1587     u32 base_addr, i;
1588 
1589     base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1590     base_addr += region->start;
1591 
1592     for (i = 0; i < region->len; i += 4) {
1593         iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1594         *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1595     }
1596 
1597     return region->len;
1598 }
1599 
1600 /* if an error happened returns < 0, otherwise the length */
1601 static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1602                       const struct ath10k_mem_region *region,
1603                       u8 *buf)
1604 {
1605     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1606     u32 i;
1607     int ret;
1608 
1609     mutex_lock(&ar->conf_mutex);
1610     if (ar->state != ATH10K_STATE_ON) {
1611         ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
1612         ret = -EIO;
1613         goto done;
1614     }
1615 
1616     for (i = 0; i < region->len; i += 4)
1617         *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1618 
1619     ret = region->len;
1620 done:
1621     mutex_unlock(&ar->conf_mutex);
1622     return ret;
1623 }
1624 
1625 /* if an error happened returns < 0, otherwise the length */
1626 static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1627                       const struct ath10k_mem_region *current_region,
1628                       u8 *buf)
1629 {
1630     int ret;
1631 
1632     if (current_region->section_table.size > 0)
1633         /* Copy each section individually. */
1634         return ath10k_pci_dump_memory_section(ar,
1635                               current_region,
1636                               buf,
1637                               current_region->len);
1638 
1639     /* No individiual memory sections defined so we can
1640      * copy the entire memory region.
1641      */
1642     ret = ath10k_pci_diag_read_mem(ar,
1643                        current_region->start,
1644                        buf,
1645                        current_region->len);
1646     if (ret) {
1647         ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1648                 current_region->name, ret);
1649         return ret;
1650     }
1651 
1652     return current_region->len;
1653 }
1654 
1655 static void ath10k_pci_dump_memory(struct ath10k *ar,
1656                    struct ath10k_fw_crash_data *crash_data)
1657 {
1658     const struct ath10k_hw_mem_layout *mem_layout;
1659     const struct ath10k_mem_region *current_region;
1660     struct ath10k_dump_ram_data_hdr *hdr;
1661     u32 count, shift;
1662     size_t buf_len;
1663     int ret, i;
1664     u8 *buf;
1665 
1666     lockdep_assert_held(&ar->dump_mutex);
1667 
1668     if (!crash_data)
1669         return;
1670 
1671     mem_layout = ath10k_coredump_get_mem_layout(ar);
1672     if (!mem_layout)
1673         return;
1674 
1675     current_region = &mem_layout->region_table.regions[0];
1676 
1677     buf = crash_data->ramdump_buf;
1678     buf_len = crash_data->ramdump_buf_len;
1679 
1680     memset(buf, 0, buf_len);
1681 
1682     for (i = 0; i < mem_layout->region_table.size; i++) {
1683         count = 0;
1684 
1685         if (current_region->len > buf_len) {
1686             ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1687                     current_region->name,
1688                     current_region->len,
1689                     buf_len);
1690             break;
1691         }
1692 
1693         /* To get IRAM dump, the host driver needs to switch target
1694          * ram config from DRAM to IRAM.
1695          */
1696         if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1697             current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1698             shift = current_region->start >> 20;
1699 
1700             ret = ath10k_pci_set_ram_config(ar, shift);
1701             if (ret) {
1702                 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1703                         current_region->name, ret);
1704                 break;
1705             }
1706         }
1707 
1708         /* Reserve space for the header. */
1709         hdr = (void *)buf;
1710         buf += sizeof(*hdr);
1711         buf_len -= sizeof(*hdr);
1712 
1713         switch (current_region->type) {
1714         case ATH10K_MEM_REGION_TYPE_IOSRAM:
1715             count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1716             break;
1717         case ATH10K_MEM_REGION_TYPE_IOREG:
1718             ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1719             if (ret < 0)
1720                 break;
1721 
1722             count = ret;
1723             break;
1724         default:
1725             ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1726             if (ret < 0)
1727                 break;
1728 
1729             count = ret;
1730             break;
1731         }
1732 
1733         hdr->region_type = cpu_to_le32(current_region->type);
1734         hdr->start = cpu_to_le32(current_region->start);
1735         hdr->length = cpu_to_le32(count);
1736 
1737         if (count == 0)
1738             /* Note: the header remains, just with zero length. */
1739             break;
1740 
1741         buf += count;
1742         buf_len -= count;
1743 
1744         current_region++;
1745     }
1746 }
1747 
1748 static void ath10k_pci_fw_dump_work(struct work_struct *work)
1749 {
1750     struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
1751                          dump_work);
1752     struct ath10k_fw_crash_data *crash_data;
1753     struct ath10k *ar = ar_pci->ar;
1754     char guid[UUID_STRING_LEN + 1];
1755 
1756     mutex_lock(&ar->dump_mutex);
1757 
1758     spin_lock_bh(&ar->data_lock);
1759     ar->stats.fw_crash_counter++;
1760     spin_unlock_bh(&ar->data_lock);
1761 
1762     crash_data = ath10k_coredump_new(ar);
1763 
1764     if (crash_data)
1765         scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1766     else
1767         scnprintf(guid, sizeof(guid), "n/a");
1768 
1769     ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1770     ath10k_print_driver_info(ar);
1771     ath10k_pci_dump_registers(ar, crash_data);
1772     ath10k_ce_dump_registers(ar, crash_data);
1773     ath10k_pci_dump_memory(ar, crash_data);
1774 
1775     mutex_unlock(&ar->dump_mutex);
1776 
1777     ath10k_core_start_recovery(ar);
1778 }
1779 
1780 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1781 {
1782     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1783 
1784     queue_work(ar->workqueue, &ar_pci->dump_work);
1785 }
1786 
1787 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1788                     int force)
1789 {
1790     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1791 
1792     ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1793 
1794     if (!force) {
1795         int resources;
1796         /*
1797          * Decide whether to actually poll for completions, or just
1798          * wait for a later chance.
1799          * If there seem to be plenty of resources left, then just wait
1800          * since checking involves reading a CE register, which is a
1801          * relatively expensive operation.
1802          */
1803         resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1804 
1805         /*
1806          * If at least 50% of the total resources are still available,
1807          * don't bother checking again yet.
1808          */
1809         if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
1810             return;
1811     }
1812     ath10k_ce_per_engine_service(ar, pipe);
1813 }
1814 
1815 static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1816 {
1817     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1818 
1819     del_timer_sync(&ar_pci->rx_post_retry);
1820 }
1821 
1822 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1823                        u8 *ul_pipe, u8 *dl_pipe)
1824 {
1825     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1826     const struct ce_service_to_pipe *entry;
1827     bool ul_set = false, dl_set = false;
1828     int i;
1829 
1830     ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1831 
1832     for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
1833         entry = &ar_pci->serv_to_pipe[i];
1834 
1835         if (__le32_to_cpu(entry->service_id) != service_id)
1836             continue;
1837 
1838         switch (__le32_to_cpu(entry->pipedir)) {
1839         case PIPEDIR_NONE:
1840             break;
1841         case PIPEDIR_IN:
1842             WARN_ON(dl_set);
1843             *dl_pipe = __le32_to_cpu(entry->pipenum);
1844             dl_set = true;
1845             break;
1846         case PIPEDIR_OUT:
1847             WARN_ON(ul_set);
1848             *ul_pipe = __le32_to_cpu(entry->pipenum);
1849             ul_set = true;
1850             break;
1851         case PIPEDIR_INOUT:
1852             WARN_ON(dl_set);
1853             WARN_ON(ul_set);
1854             *dl_pipe = __le32_to_cpu(entry->pipenum);
1855             *ul_pipe = __le32_to_cpu(entry->pipenum);
1856             dl_set = true;
1857             ul_set = true;
1858             break;
1859         }
1860     }
1861 
1862     if (!ul_set || !dl_set)
1863         return -ENOENT;
1864 
1865     return 0;
1866 }
1867 
1868 void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1869                      u8 *ul_pipe, u8 *dl_pipe)
1870 {
1871     ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1872 
1873     (void)ath10k_pci_hif_map_service_to_pipe(ar,
1874                          ATH10K_HTC_SVC_ID_RSVD_CTRL,
1875                          ul_pipe, dl_pipe);
1876 }
1877 
1878 void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1879 {
1880     u32 val;
1881 
1882     switch (ar->hw_rev) {
1883     case ATH10K_HW_QCA988X:
1884     case ATH10K_HW_QCA9887:
1885     case ATH10K_HW_QCA6174:
1886     case ATH10K_HW_QCA9377:
1887         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1888                     CORE_CTRL_ADDRESS);
1889         val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1890         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1891                    CORE_CTRL_ADDRESS, val);
1892         break;
1893     case ATH10K_HW_QCA99X0:
1894     case ATH10K_HW_QCA9984:
1895     case ATH10K_HW_QCA9888:
1896     case ATH10K_HW_QCA4019:
1897         /* TODO: Find appropriate register configuration for QCA99X0
1898          *  to mask irq/MSI.
1899          */
1900         break;
1901     case ATH10K_HW_WCN3990:
1902         break;
1903     }
1904 }
1905 
1906 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1907 {
1908     u32 val;
1909 
1910     switch (ar->hw_rev) {
1911     case ATH10K_HW_QCA988X:
1912     case ATH10K_HW_QCA9887:
1913     case ATH10K_HW_QCA6174:
1914     case ATH10K_HW_QCA9377:
1915         val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1916                     CORE_CTRL_ADDRESS);
1917         val |= CORE_CTRL_PCIE_REG_31_MASK;
1918         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1919                    CORE_CTRL_ADDRESS, val);
1920         break;
1921     case ATH10K_HW_QCA99X0:
1922     case ATH10K_HW_QCA9984:
1923     case ATH10K_HW_QCA9888:
1924     case ATH10K_HW_QCA4019:
1925         /* TODO: Find appropriate register configuration for QCA99X0
1926          *  to unmask irq/MSI.
1927          */
1928         break;
1929     case ATH10K_HW_WCN3990:
1930         break;
1931     }
1932 }
1933 
1934 static void ath10k_pci_irq_disable(struct ath10k *ar)
1935 {
1936     ath10k_ce_disable_interrupts(ar);
1937     ath10k_pci_disable_and_clear_legacy_irq(ar);
1938     ath10k_pci_irq_msi_fw_mask(ar);
1939 }
1940 
1941 static void ath10k_pci_irq_sync(struct ath10k *ar)
1942 {
1943     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1944 
1945     synchronize_irq(ar_pci->pdev->irq);
1946 }
1947 
1948 static void ath10k_pci_irq_enable(struct ath10k *ar)
1949 {
1950     ath10k_ce_enable_interrupts(ar);
1951     ath10k_pci_enable_legacy_irq(ar);
1952     ath10k_pci_irq_msi_fw_unmask(ar);
1953 }
1954 
1955 static int ath10k_pci_hif_start(struct ath10k *ar)
1956 {
1957     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1958 
1959     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1960 
1961     ath10k_core_napi_enable(ar);
1962 
1963     ath10k_pci_irq_enable(ar);
1964     ath10k_pci_rx_post(ar);
1965 
1966     pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1967                    ar_pci->link_ctl);
1968 
1969     return 0;
1970 }
1971 
1972 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1973 {
1974     struct ath10k *ar;
1975     struct ath10k_ce_pipe *ce_pipe;
1976     struct ath10k_ce_ring *ce_ring;
1977     struct sk_buff *skb;
1978     int i;
1979 
1980     ar = pci_pipe->hif_ce_state;
1981     ce_pipe = pci_pipe->ce_hdl;
1982     ce_ring = ce_pipe->dest_ring;
1983 
1984     if (!ce_ring)
1985         return;
1986 
1987     if (!pci_pipe->buf_sz)
1988         return;
1989 
1990     for (i = 0; i < ce_ring->nentries; i++) {
1991         skb = ce_ring->per_transfer_context[i];
1992         if (!skb)
1993             continue;
1994 
1995         ce_ring->per_transfer_context[i] = NULL;
1996 
1997         dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1998                  skb->len + skb_tailroom(skb),
1999                  DMA_FROM_DEVICE);
2000         dev_kfree_skb_any(skb);
2001     }
2002 }
2003 
2004 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
2005 {
2006     struct ath10k *ar;
2007     struct ath10k_ce_pipe *ce_pipe;
2008     struct ath10k_ce_ring *ce_ring;
2009     struct sk_buff *skb;
2010     int i;
2011 
2012     ar = pci_pipe->hif_ce_state;
2013     ce_pipe = pci_pipe->ce_hdl;
2014     ce_ring = ce_pipe->src_ring;
2015 
2016     if (!ce_ring)
2017         return;
2018 
2019     if (!pci_pipe->buf_sz)
2020         return;
2021 
2022     for (i = 0; i < ce_ring->nentries; i++) {
2023         skb = ce_ring->per_transfer_context[i];
2024         if (!skb)
2025             continue;
2026 
2027         ce_ring->per_transfer_context[i] = NULL;
2028 
2029         ath10k_htc_tx_completion_handler(ar, skb);
2030     }
2031 }
2032 
2033 /*
2034  * Cleanup residual buffers for device shutdown:
2035  *    buffers that were enqueued for receive
2036  *    buffers that were to be sent
2037  * Note: Buffers that had completed but which were
2038  * not yet processed are on a completion queue. They
2039  * are handled when the completion thread shuts down.
2040  */
2041 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2042 {
2043     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2044     int pipe_num;
2045 
2046     for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2047         struct ath10k_pci_pipe *pipe_info;
2048 
2049         pipe_info = &ar_pci->pipe_info[pipe_num];
2050         ath10k_pci_rx_pipe_cleanup(pipe_info);
2051         ath10k_pci_tx_pipe_cleanup(pipe_info);
2052     }
2053 }
2054 
2055 void ath10k_pci_ce_deinit(struct ath10k *ar)
2056 {
2057     int i;
2058 
2059     for (i = 0; i < CE_COUNT; i++)
2060         ath10k_ce_deinit_pipe(ar, i);
2061 }
2062 
2063 void ath10k_pci_flush(struct ath10k *ar)
2064 {
2065     ath10k_pci_rx_retry_sync(ar);
2066     ath10k_pci_buffer_cleanup(ar);
2067 }
2068 
2069 static void ath10k_pci_hif_stop(struct ath10k *ar)
2070 {
2071     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2072     unsigned long flags;
2073 
2074     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2075 
2076     ath10k_pci_irq_disable(ar);
2077     ath10k_pci_irq_sync(ar);
2078 
2079     ath10k_core_napi_sync_disable(ar);
2080 
2081     cancel_work_sync(&ar_pci->dump_work);
2082 
2083     /* Most likely the device has HTT Rx ring configured. The only way to
2084      * prevent the device from accessing (and possible corrupting) host
2085      * memory is to reset the chip now.
2086      *
2087      * There's also no known way of masking MSI interrupts on the device.
2088      * For ranged MSI the CE-related interrupts can be masked. However
2089      * regardless how many MSI interrupts are assigned the first one
2090      * is always used for firmware indications (crashes) and cannot be
2091      * masked. To prevent the device from asserting the interrupt reset it
2092      * before proceeding with cleanup.
2093      */
2094     ath10k_pci_safe_chip_reset(ar);
2095 
2096     ath10k_pci_flush(ar);
2097 
2098     spin_lock_irqsave(&ar_pci->ps_lock, flags);
2099     WARN_ON(ar_pci->ps_wake_refcount > 0);
2100     spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2101 }
2102 
2103 int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2104                     void *req, u32 req_len,
2105                     void *resp, u32 *resp_len)
2106 {
2107     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2108     struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2109     struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2110     struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2111     struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2112     dma_addr_t req_paddr = 0;
2113     dma_addr_t resp_paddr = 0;
2114     struct bmi_xfer xfer = {};
2115     void *treq, *tresp = NULL;
2116     int ret = 0;
2117 
2118     might_sleep();
2119 
2120     if (resp && !resp_len)
2121         return -EINVAL;
2122 
2123     if (resp && resp_len && *resp_len == 0)
2124         return -EINVAL;
2125 
2126     treq = kmemdup(req, req_len, GFP_KERNEL);
2127     if (!treq)
2128         return -ENOMEM;
2129 
2130     req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2131     ret = dma_mapping_error(ar->dev, req_paddr);
2132     if (ret) {
2133         ret = -EIO;
2134         goto err_dma;
2135     }
2136 
2137     if (resp && resp_len) {
2138         tresp = kzalloc(*resp_len, GFP_KERNEL);
2139         if (!tresp) {
2140             ret = -ENOMEM;
2141             goto err_req;
2142         }
2143 
2144         resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2145                         DMA_FROM_DEVICE);
2146         ret = dma_mapping_error(ar->dev, resp_paddr);
2147         if (ret) {
2148             ret = -EIO;
2149             goto err_req;
2150         }
2151 
2152         xfer.wait_for_resp = true;
2153         xfer.resp_len = 0;
2154 
2155         ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2156     }
2157 
2158     ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2159     if (ret)
2160         goto err_resp;
2161 
2162     ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2163     if (ret) {
2164         dma_addr_t unused_buffer;
2165         unsigned int unused_nbytes;
2166         unsigned int unused_id;
2167 
2168         ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2169                        &unused_nbytes, &unused_id);
2170     } else {
2171         /* non-zero means we did not time out */
2172         ret = 0;
2173     }
2174 
2175 err_resp:
2176     if (resp) {
2177         dma_addr_t unused_buffer;
2178 
2179         ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2180         dma_unmap_single(ar->dev, resp_paddr,
2181                  *resp_len, DMA_FROM_DEVICE);
2182     }
2183 err_req:
2184     dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2185 
2186     if (ret == 0 && resp_len) {
2187         *resp_len = min(*resp_len, xfer.resp_len);
2188         memcpy(resp, tresp, *resp_len);
2189     }
2190 err_dma:
2191     kfree(treq);
2192     kfree(tresp);
2193 
2194     return ret;
2195 }
2196 
2197 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2198 {
2199     struct bmi_xfer *xfer;
2200 
2201     if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2202         return;
2203 
2204     xfer->tx_done = true;
2205 }
2206 
2207 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2208 {
2209     struct ath10k *ar = ce_state->ar;
2210     struct bmi_xfer *xfer;
2211     unsigned int nbytes;
2212 
2213     if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2214                       &nbytes))
2215         return;
2216 
2217     if (WARN_ON_ONCE(!xfer))
2218         return;
2219 
2220     if (!xfer->wait_for_resp) {
2221         ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2222         return;
2223     }
2224 
2225     xfer->resp_len = nbytes;
2226     xfer->rx_done = true;
2227 }
2228 
2229 static int ath10k_pci_bmi_wait(struct ath10k *ar,
2230                    struct ath10k_ce_pipe *tx_pipe,
2231                    struct ath10k_ce_pipe *rx_pipe,
2232                    struct bmi_xfer *xfer)
2233 {
2234     unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2235     unsigned long started = jiffies;
2236     unsigned long dur;
2237     int ret;
2238 
2239     while (time_before_eq(jiffies, timeout)) {
2240         ath10k_pci_bmi_send_done(tx_pipe);
2241         ath10k_pci_bmi_recv_data(rx_pipe);
2242 
2243         if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2244             ret = 0;
2245             goto out;
2246         }
2247 
2248         schedule();
2249     }
2250 
2251     ret = -ETIMEDOUT;
2252 
2253 out:
2254     dur = jiffies - started;
2255     if (dur > HZ)
2256         ath10k_dbg(ar, ATH10K_DBG_BMI,
2257                "bmi cmd took %lu jiffies hz %d ret %d\n",
2258                dur, HZ, ret);
2259     return ret;
2260 }
2261 
2262 /*
2263  * Send an interrupt to the device to wake up the Target CPU
2264  * so it has an opportunity to notice any changed state.
2265  */
2266 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2267 {
2268     u32 addr, val;
2269 
2270     addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2271     val = ath10k_pci_read32(ar, addr);
2272     val |= CORE_CTRL_CPU_INTR_MASK;
2273     ath10k_pci_write32(ar, addr, val);
2274 
2275     return 0;
2276 }
2277 
2278 static int ath10k_pci_get_num_banks(struct ath10k *ar)
2279 {
2280     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2281 
2282     switch (ar_pci->pdev->device) {
2283     case QCA988X_2_0_DEVICE_ID_UBNT:
2284     case QCA988X_2_0_DEVICE_ID:
2285     case QCA99X0_2_0_DEVICE_ID:
2286     case QCA9888_2_0_DEVICE_ID:
2287     case QCA9984_1_0_DEVICE_ID:
2288     case QCA9887_1_0_DEVICE_ID:
2289         return 1;
2290     case QCA6164_2_1_DEVICE_ID:
2291     case QCA6174_2_1_DEVICE_ID:
2292         switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2293         case QCA6174_HW_1_0_CHIP_ID_REV:
2294         case QCA6174_HW_1_1_CHIP_ID_REV:
2295         case QCA6174_HW_2_1_CHIP_ID_REV:
2296         case QCA6174_HW_2_2_CHIP_ID_REV:
2297             return 3;
2298         case QCA6174_HW_1_3_CHIP_ID_REV:
2299             return 2;
2300         case QCA6174_HW_3_0_CHIP_ID_REV:
2301         case QCA6174_HW_3_1_CHIP_ID_REV:
2302         case QCA6174_HW_3_2_CHIP_ID_REV:
2303             return 9;
2304         }
2305         break;
2306     case QCA9377_1_0_DEVICE_ID:
2307         return 9;
2308     }
2309 
2310     ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2311     return 1;
2312 }
2313 
2314 static int ath10k_bus_get_num_banks(struct ath10k *ar)
2315 {
2316     struct ath10k_ce *ce = ath10k_ce_priv(ar);
2317 
2318     return ce->bus_ops->get_num_banks(ar);
2319 }
2320 
2321 int ath10k_pci_init_config(struct ath10k *ar)
2322 {
2323     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2324     u32 interconnect_targ_addr;
2325     u32 pcie_state_targ_addr = 0;
2326     u32 pipe_cfg_targ_addr = 0;
2327     u32 svc_to_pipe_map = 0;
2328     u32 pcie_config_flags = 0;
2329     u32 ealloc_value;
2330     u32 ealloc_targ_addr;
2331     u32 flag2_value;
2332     u32 flag2_targ_addr;
2333     int ret = 0;
2334 
2335     /* Download to Target the CE Config and the service-to-CE map */
2336     interconnect_targ_addr =
2337         host_interest_item_address(HI_ITEM(hi_interconnect_state));
2338 
2339     /* Supply Target-side CE configuration */
2340     ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2341                      &pcie_state_targ_addr);
2342     if (ret != 0) {
2343         ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2344         return ret;
2345     }
2346 
2347     if (pcie_state_targ_addr == 0) {
2348         ret = -EIO;
2349         ath10k_err(ar, "Invalid pcie state addr\n");
2350         return ret;
2351     }
2352 
2353     ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2354                       offsetof(struct pcie_state,
2355                            pipe_cfg_addr)),
2356                      &pipe_cfg_targ_addr);
2357     if (ret != 0) {
2358         ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2359         return ret;
2360     }
2361 
2362     if (pipe_cfg_targ_addr == 0) {
2363         ret = -EIO;
2364         ath10k_err(ar, "Invalid pipe cfg addr\n");
2365         return ret;
2366     }
2367 
2368     ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2369                     ar_pci->pipe_config,
2370                     sizeof(struct ce_pipe_config) *
2371                     NUM_TARGET_CE_CONFIG_WLAN);
2372 
2373     if (ret != 0) {
2374         ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2375         return ret;
2376     }
2377 
2378     ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2379                       offsetof(struct pcie_state,
2380                            svc_to_pipe_map)),
2381                      &svc_to_pipe_map);
2382     if (ret != 0) {
2383         ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2384         return ret;
2385     }
2386 
2387     if (svc_to_pipe_map == 0) {
2388         ret = -EIO;
2389         ath10k_err(ar, "Invalid svc_to_pipe map\n");
2390         return ret;
2391     }
2392 
2393     ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2394                     ar_pci->serv_to_pipe,
2395                     sizeof(pci_target_service_to_ce_map_wlan));
2396     if (ret != 0) {
2397         ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2398         return ret;
2399     }
2400 
2401     ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2402                       offsetof(struct pcie_state,
2403                            config_flags)),
2404                      &pcie_config_flags);
2405     if (ret != 0) {
2406         ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2407         return ret;
2408     }
2409 
2410     pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2411 
2412     ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2413                        offsetof(struct pcie_state,
2414                             config_flags)),
2415                       pcie_config_flags);
2416     if (ret != 0) {
2417         ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2418         return ret;
2419     }
2420 
2421     /* configure early allocation */
2422     ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2423 
2424     ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2425     if (ret != 0) {
2426         ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2427         return ret;
2428     }
2429 
2430     /* first bank is switched to IRAM */
2431     ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2432              HI_EARLY_ALLOC_MAGIC_MASK);
2433     ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2434               HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2435              HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2436 
2437     ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2438     if (ret != 0) {
2439         ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2440         return ret;
2441     }
2442 
2443     /* Tell Target to proceed with initialization */
2444     flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2445 
2446     ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2447     if (ret != 0) {
2448         ath10k_err(ar, "Failed to get option val: %d\n", ret);
2449         return ret;
2450     }
2451 
2452     flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2453 
2454     ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2455     if (ret != 0) {
2456         ath10k_err(ar, "Failed to set option val: %d\n", ret);
2457         return ret;
2458     }
2459 
2460     return 0;
2461 }
2462 
2463 static void ath10k_pci_override_ce_config(struct ath10k *ar)
2464 {
2465     struct ce_attr *attr;
2466     struct ce_pipe_config *config;
2467     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2468 
2469     /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2470      * since it is currently used for other feature.
2471      */
2472 
2473     /* Override Host's Copy Engine 5 configuration */
2474     attr = &ar_pci->attr[5];
2475     attr->src_sz_max = 0;
2476     attr->dest_nentries = 0;
2477 
2478     /* Override Target firmware's Copy Engine configuration */
2479     config = &ar_pci->pipe_config[5];
2480     config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2481     config->nbytes_max = __cpu_to_le32(2048);
2482 
2483     /* Map from service/endpoint to Copy Engine */
2484     ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
2485 }
2486 
2487 int ath10k_pci_alloc_pipes(struct ath10k *ar)
2488 {
2489     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2490     struct ath10k_pci_pipe *pipe;
2491     struct ath10k_ce *ce = ath10k_ce_priv(ar);
2492     int i, ret;
2493 
2494     for (i = 0; i < CE_COUNT; i++) {
2495         pipe = &ar_pci->pipe_info[i];
2496         pipe->ce_hdl = &ce->ce_states[i];
2497         pipe->pipe_num = i;
2498         pipe->hif_ce_state = ar;
2499 
2500         ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
2501         if (ret) {
2502             ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2503                    i, ret);
2504             return ret;
2505         }
2506 
2507         /* Last CE is Diagnostic Window */
2508         if (i == CE_DIAG_PIPE) {
2509             ar_pci->ce_diag = pipe->ce_hdl;
2510             continue;
2511         }
2512 
2513         pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
2514     }
2515 
2516     return 0;
2517 }
2518 
2519 void ath10k_pci_free_pipes(struct ath10k *ar)
2520 {
2521     int i;
2522 
2523     for (i = 0; i < CE_COUNT; i++)
2524         ath10k_ce_free_pipe(ar, i);
2525 }
2526 
2527 int ath10k_pci_init_pipes(struct ath10k *ar)
2528 {
2529     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2530     int i, ret;
2531 
2532     for (i = 0; i < CE_COUNT; i++) {
2533         ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
2534         if (ret) {
2535             ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2536                    i, ret);
2537             return ret;
2538         }
2539     }
2540 
2541     return 0;
2542 }
2543 
2544 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2545 {
2546     return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2547            FW_IND_EVENT_PENDING;
2548 }
2549 
2550 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2551 {
2552     u32 val;
2553 
2554     val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2555     val &= ~FW_IND_EVENT_PENDING;
2556     ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2557 }
2558 
2559 static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2560 {
2561     u32 val;
2562 
2563     val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2564     return (val == 0xffffffff);
2565 }
2566 
2567 /* this function effectively clears target memory controller assert line */
2568 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2569 {
2570     u32 val;
2571 
2572     val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2573     ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2574                    val | SOC_RESET_CONTROL_SI0_RST_MASK);
2575     val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2576 
2577     msleep(10);
2578 
2579     val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2580     ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2581                    val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2582     val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2583 
2584     msleep(10);
2585 }
2586 
2587 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2588 {
2589     u32 val;
2590 
2591     ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2592 
2593     val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2594     ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2595                    val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2596 }
2597 
2598 static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2599 {
2600     u32 val;
2601 
2602     val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2603 
2604     ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2605                    val | SOC_RESET_CONTROL_CE_RST_MASK);
2606     msleep(10);
2607     ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2608                    val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2609 }
2610 
2611 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2612 {
2613     u32 val;
2614 
2615     val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
2616     ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
2617                    val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2618 }
2619 
2620 static int ath10k_pci_warm_reset(struct ath10k *ar)
2621 {
2622     int ret;
2623 
2624     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2625 
2626     spin_lock_bh(&ar->data_lock);
2627     ar->stats.fw_warm_reset_counter++;
2628     spin_unlock_bh(&ar->data_lock);
2629 
2630     ath10k_pci_irq_disable(ar);
2631 
2632     /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2633      * were to access copy engine while host performs copy engine reset
2634      * then it is possible for the device to confuse pci-e controller to
2635      * the point of bringing host system to a complete stop (i.e. hang).
2636      */
2637     ath10k_pci_warm_reset_si0(ar);
2638     ath10k_pci_warm_reset_cpu(ar);
2639     ath10k_pci_init_pipes(ar);
2640     ath10k_pci_wait_for_target_init(ar);
2641 
2642     ath10k_pci_warm_reset_clear_lf(ar);
2643     ath10k_pci_warm_reset_ce(ar);
2644     ath10k_pci_warm_reset_cpu(ar);
2645     ath10k_pci_init_pipes(ar);
2646 
2647     ret = ath10k_pci_wait_for_target_init(ar);
2648     if (ret) {
2649         ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2650         return ret;
2651     }
2652 
2653     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2654 
2655     return 0;
2656 }
2657 
2658 static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2659 {
2660     ath10k_pci_irq_disable(ar);
2661     return ath10k_pci_qca99x0_chip_reset(ar);
2662 }
2663 
2664 static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2665 {
2666     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2667 
2668     if (!ar_pci->pci_soft_reset)
2669         return -ENOTSUPP;
2670 
2671     return ar_pci->pci_soft_reset(ar);
2672 }
2673 
2674 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2675 {
2676     int i, ret;
2677     u32 val;
2678 
2679     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2680 
2681     /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2682      * It is thus preferred to use warm reset which is safer but may not be
2683      * able to recover the device from all possible fail scenarios.
2684      *
2685      * Warm reset doesn't always work on first try so attempt it a few
2686      * times before giving up.
2687      */
2688     for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2689         ret = ath10k_pci_warm_reset(ar);
2690         if (ret) {
2691             ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2692                     i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2693                     ret);
2694             continue;
2695         }
2696 
2697         /* FIXME: Sometimes copy engine doesn't recover after warm
2698          * reset. In most cases this needs cold reset. In some of these
2699          * cases the device is in such a state that a cold reset may
2700          * lock up the host.
2701          *
2702          * Reading any host interest register via copy engine is
2703          * sufficient to verify if device is capable of booting
2704          * firmware blob.
2705          */
2706         ret = ath10k_pci_init_pipes(ar);
2707         if (ret) {
2708             ath10k_warn(ar, "failed to init copy engine: %d\n",
2709                     ret);
2710             continue;
2711         }
2712 
2713         ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2714                          &val);
2715         if (ret) {
2716             ath10k_warn(ar, "failed to poke copy engine: %d\n",
2717                     ret);
2718             continue;
2719         }
2720 
2721         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2722         return 0;
2723     }
2724 
2725     if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2726         ath10k_warn(ar, "refusing cold reset as requested\n");
2727         return -EPERM;
2728     }
2729 
2730     ret = ath10k_pci_cold_reset(ar);
2731     if (ret) {
2732         ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2733         return ret;
2734     }
2735 
2736     ret = ath10k_pci_wait_for_target_init(ar);
2737     if (ret) {
2738         ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2739                 ret);
2740         return ret;
2741     }
2742 
2743     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2744 
2745     return 0;
2746 }
2747 
2748 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2749 {
2750     int ret;
2751 
2752     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2753 
2754     /* FIXME: QCA6174 requires cold + warm reset to work. */
2755 
2756     ret = ath10k_pci_cold_reset(ar);
2757     if (ret) {
2758         ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2759         return ret;
2760     }
2761 
2762     ret = ath10k_pci_wait_for_target_init(ar);
2763     if (ret) {
2764         ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2765                 ret);
2766         return ret;
2767     }
2768 
2769     ret = ath10k_pci_warm_reset(ar);
2770     if (ret) {
2771         ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2772         return ret;
2773     }
2774 
2775     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2776 
2777     return 0;
2778 }
2779 
2780 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2781 {
2782     int ret;
2783 
2784     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2785 
2786     ret = ath10k_pci_cold_reset(ar);
2787     if (ret) {
2788         ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2789         return ret;
2790     }
2791 
2792     ret = ath10k_pci_wait_for_target_init(ar);
2793     if (ret) {
2794         ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2795                 ret);
2796         return ret;
2797     }
2798 
2799     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2800 
2801     return 0;
2802 }
2803 
2804 static int ath10k_pci_chip_reset(struct ath10k *ar)
2805 {
2806     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2807 
2808     if (WARN_ON(!ar_pci->pci_hard_reset))
2809         return -ENOTSUPP;
2810 
2811     return ar_pci->pci_hard_reset(ar);
2812 }
2813 
2814 static int ath10k_pci_hif_power_up(struct ath10k *ar,
2815                    enum ath10k_firmware_mode fw_mode)
2816 {
2817     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2818     int ret;
2819 
2820     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2821 
2822     pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2823                   &ar_pci->link_ctl);
2824     pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2825                    ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2826 
2827     /*
2828      * Bring the target up cleanly.
2829      *
2830      * The target may be in an undefined state with an AUX-powered Target
2831      * and a Host in WoW mode. If the Host crashes, loses power, or is
2832      * restarted (without unloading the driver) then the Target is left
2833      * (aux) powered and running. On a subsequent driver load, the Target
2834      * is in an unexpected state. We try to catch that here in order to
2835      * reset the Target and retry the probe.
2836      */
2837     ret = ath10k_pci_chip_reset(ar);
2838     if (ret) {
2839         if (ath10k_pci_has_fw_crashed(ar)) {
2840             ath10k_warn(ar, "firmware crashed during chip reset\n");
2841             ath10k_pci_fw_crashed_clear(ar);
2842             ath10k_pci_fw_crashed_dump(ar);
2843         }
2844 
2845         ath10k_err(ar, "failed to reset chip: %d\n", ret);
2846         goto err_sleep;
2847     }
2848 
2849     ret = ath10k_pci_init_pipes(ar);
2850     if (ret) {
2851         ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2852         goto err_sleep;
2853     }
2854 
2855     ret = ath10k_pci_init_config(ar);
2856     if (ret) {
2857         ath10k_err(ar, "failed to setup init config: %d\n", ret);
2858         goto err_ce;
2859     }
2860 
2861     ret = ath10k_pci_wake_target_cpu(ar);
2862     if (ret) {
2863         ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2864         goto err_ce;
2865     }
2866 
2867     return 0;
2868 
2869 err_ce:
2870     ath10k_pci_ce_deinit(ar);
2871 
2872 err_sleep:
2873     return ret;
2874 }
2875 
2876 void ath10k_pci_hif_power_down(struct ath10k *ar)
2877 {
2878     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2879 
2880     /* Currently hif_power_up performs effectively a reset and hif_stop
2881      * resets the chip as well so there's no point in resetting here.
2882      */
2883 }
2884 
2885 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2886 {
2887     /* Nothing to do; the important stuff is in the driver suspend. */
2888     return 0;
2889 }
2890 
2891 static int ath10k_pci_suspend(struct ath10k *ar)
2892 {
2893     /* The grace timer can still be counting down and ar->ps_awake be true.
2894      * It is known that the device may be asleep after resuming regardless
2895      * of the SoC powersave state before suspending. Hence make sure the
2896      * device is asleep before proceeding.
2897      */
2898     ath10k_pci_sleep_sync(ar);
2899 
2900     return 0;
2901 }
2902 
2903 static int ath10k_pci_hif_resume(struct ath10k *ar)
2904 {
2905     /* Nothing to do; the important stuff is in the driver resume. */
2906     return 0;
2907 }
2908 
2909 static int ath10k_pci_resume(struct ath10k *ar)
2910 {
2911     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2912     struct pci_dev *pdev = ar_pci->pdev;
2913     u32 val;
2914     int ret = 0;
2915 
2916     ret = ath10k_pci_force_wake(ar);
2917     if (ret) {
2918         ath10k_err(ar, "failed to wake up target: %d\n", ret);
2919         return ret;
2920     }
2921 
2922     /* Suspend/Resume resets the PCI configuration space, so we have to
2923      * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2924      * from interfering with C3 CPU state. pci_restore_state won't help
2925      * here since it only restores the first 64 bytes pci config header.
2926      */
2927     pci_read_config_dword(pdev, 0x40, &val);
2928     if ((val & 0x0000ff00) != 0)
2929         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2930 
2931     return ret;
2932 }
2933 
2934 static bool ath10k_pci_validate_cal(void *data, size_t size)
2935 {
2936     __le16 *cal_words = data;
2937     u16 checksum = 0;
2938     size_t i;
2939 
2940     if (size % 2 != 0)
2941         return false;
2942 
2943     for (i = 0; i < size / 2; i++)
2944         checksum ^= le16_to_cpu(cal_words[i]);
2945 
2946     return checksum == 0xffff;
2947 }
2948 
2949 static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2950 {
2951     /* Enable SI clock */
2952     ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2953 
2954     /* Configure GPIOs for I2C operation */
2955     ath10k_pci_write32(ar,
2956                GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2957                4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2958                SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2959                   GPIO_PIN0_CONFIG) |
2960                SM(1, GPIO_PIN0_PAD_PULL));
2961 
2962     ath10k_pci_write32(ar,
2963                GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2964                4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2965                SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2966                SM(1, GPIO_PIN0_PAD_PULL));
2967 
2968     ath10k_pci_write32(ar,
2969                GPIO_BASE_ADDRESS +
2970                QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2971                1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2972 
2973     /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2974     ath10k_pci_write32(ar,
2975                SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2976                SM(1, SI_CONFIG_ERR_INT) |
2977                SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2978                SM(1, SI_CONFIG_I2C) |
2979                SM(1, SI_CONFIG_POS_SAMPLE) |
2980                SM(1, SI_CONFIG_INACTIVE_DATA) |
2981                SM(1, SI_CONFIG_INACTIVE_CLK) |
2982                SM(8, SI_CONFIG_DIVIDER));
2983 }
2984 
2985 static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2986 {
2987     u32 reg;
2988     int wait_limit;
2989 
2990     /* set device select byte and for the read operation */
2991     reg = QCA9887_EEPROM_SELECT_READ |
2992           SM(addr, QCA9887_EEPROM_ADDR_LO) |
2993           SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2994     ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2995 
2996     /* write transmit data, transfer length, and START bit */
2997     ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2998                SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2999                SM(4, SI_CS_TX_CNT));
3000 
3001     /* wait max 1 sec */
3002     wait_limit = 100000;
3003 
3004     /* wait for SI_CS_DONE_INT */
3005     do {
3006         reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
3007         if (MS(reg, SI_CS_DONE_INT))
3008             break;
3009 
3010         wait_limit--;
3011         udelay(10);
3012     } while (wait_limit > 0);
3013 
3014     if (!MS(reg, SI_CS_DONE_INT)) {
3015         ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
3016                addr);
3017         return -ETIMEDOUT;
3018     }
3019 
3020     /* clear SI_CS_DONE_INT */
3021     ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
3022 
3023     if (MS(reg, SI_CS_DONE_ERR)) {
3024         ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3025         return -EIO;
3026     }
3027 
3028     /* extract receive data */
3029     reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3030     *out = reg;
3031 
3032     return 0;
3033 }
3034 
3035 static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3036                        size_t *data_len)
3037 {
3038     u8 *caldata = NULL;
3039     size_t calsize, i;
3040     int ret;
3041 
3042     if (!QCA_REV_9887(ar))
3043         return -EOPNOTSUPP;
3044 
3045     calsize = ar->hw_params.cal_data_len;
3046     caldata = kmalloc(calsize, GFP_KERNEL);
3047     if (!caldata)
3048         return -ENOMEM;
3049 
3050     ath10k_pci_enable_eeprom(ar);
3051 
3052     for (i = 0; i < calsize; i++) {
3053         ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3054         if (ret)
3055             goto err_free;
3056     }
3057 
3058     if (!ath10k_pci_validate_cal(caldata, calsize))
3059         goto err_free;
3060 
3061     *data = caldata;
3062     *data_len = calsize;
3063 
3064     return 0;
3065 
3066 err_free:
3067     kfree(caldata);
3068 
3069     return -EINVAL;
3070 }
3071 
3072 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3073     .tx_sg          = ath10k_pci_hif_tx_sg,
3074     .diag_read      = ath10k_pci_hif_diag_read,
3075     .diag_write     = ath10k_pci_diag_write_mem,
3076     .exchange_bmi_msg   = ath10k_pci_hif_exchange_bmi_msg,
3077     .start          = ath10k_pci_hif_start,
3078     .stop           = ath10k_pci_hif_stop,
3079     .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
3080     .get_default_pipe   = ath10k_pci_hif_get_default_pipe,
3081     .send_complete_check    = ath10k_pci_hif_send_complete_check,
3082     .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
3083     .power_up       = ath10k_pci_hif_power_up,
3084     .power_down     = ath10k_pci_hif_power_down,
3085     .read32         = ath10k_pci_read32,
3086     .write32        = ath10k_pci_write32,
3087     .suspend        = ath10k_pci_hif_suspend,
3088     .resume         = ath10k_pci_hif_resume,
3089     .fetch_cal_eeprom   = ath10k_pci_hif_fetch_cal_eeprom,
3090 };
3091 
3092 /*
3093  * Top-level interrupt handler for all PCI interrupts from a Target.
3094  * When a block of MSI interrupts is allocated, this top-level handler
3095  * is not used; instead, we directly call the correct sub-handler.
3096  */
3097 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3098 {
3099     struct ath10k *ar = arg;
3100     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3101     int ret;
3102 
3103     if (ath10k_pci_has_device_gone(ar))
3104         return IRQ_NONE;
3105 
3106     ret = ath10k_pci_force_wake(ar);
3107     if (ret) {
3108         ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3109         return IRQ_NONE;
3110     }
3111 
3112     if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3113         !ath10k_pci_irq_pending(ar))
3114         return IRQ_NONE;
3115 
3116     ath10k_pci_disable_and_clear_legacy_irq(ar);
3117     ath10k_pci_irq_msi_fw_mask(ar);
3118     napi_schedule(&ar->napi);
3119 
3120     return IRQ_HANDLED;
3121 }
3122 
3123 static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3124 {
3125     struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3126     int done = 0;
3127 
3128     if (ath10k_pci_has_fw_crashed(ar)) {
3129         ath10k_pci_fw_crashed_clear(ar);
3130         ath10k_pci_fw_crashed_dump(ar);
3131         napi_complete(ctx);
3132         return done;
3133     }
3134 
3135     ath10k_ce_per_engine_service_any(ar);
3136 
3137     done = ath10k_htt_txrx_compl_task(ar, budget);
3138 
3139     if (done < budget) {
3140         napi_complete_done(ctx, done);
3141         /* In case of MSI, it is possible that interrupts are received
3142          * while NAPI poll is inprogress. So pending interrupts that are
3143          * received after processing all copy engine pipes by NAPI poll
3144          * will not be handled again. This is causing failure to
3145          * complete boot sequence in x86 platform. So before enabling
3146          * interrupts safer to check for pending interrupts for
3147          * immediate servicing.
3148          */
3149         if (ath10k_ce_interrupt_summary(ar)) {
3150             napi_reschedule(ctx);
3151             goto out;
3152         }
3153         ath10k_pci_enable_legacy_irq(ar);
3154         ath10k_pci_irq_msi_fw_unmask(ar);
3155     }
3156 
3157 out:
3158     return done;
3159 }
3160 
3161 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3162 {
3163     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3164     int ret;
3165 
3166     ret = request_irq(ar_pci->pdev->irq,
3167               ath10k_pci_interrupt_handler,
3168               IRQF_SHARED, "ath10k_pci", ar);
3169     if (ret) {
3170         ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3171                 ar_pci->pdev->irq, ret);
3172         return ret;
3173     }
3174 
3175     return 0;
3176 }
3177 
3178 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3179 {
3180     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3181     int ret;
3182 
3183     ret = request_irq(ar_pci->pdev->irq,
3184               ath10k_pci_interrupt_handler,
3185               IRQF_SHARED, "ath10k_pci", ar);
3186     if (ret) {
3187         ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3188                 ar_pci->pdev->irq, ret);
3189         return ret;
3190     }
3191 
3192     return 0;
3193 }
3194 
3195 static int ath10k_pci_request_irq(struct ath10k *ar)
3196 {
3197     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3198 
3199     switch (ar_pci->oper_irq_mode) {
3200     case ATH10K_PCI_IRQ_LEGACY:
3201         return ath10k_pci_request_irq_legacy(ar);
3202     case ATH10K_PCI_IRQ_MSI:
3203         return ath10k_pci_request_irq_msi(ar);
3204     default:
3205         return -EINVAL;
3206     }
3207 }
3208 
3209 static void ath10k_pci_free_irq(struct ath10k *ar)
3210 {
3211     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3212 
3213     free_irq(ar_pci->pdev->irq, ar);
3214 }
3215 
3216 void ath10k_pci_init_napi(struct ath10k *ar)
3217 {
3218     netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3219                NAPI_POLL_WEIGHT);
3220 }
3221 
3222 static int ath10k_pci_init_irq(struct ath10k *ar)
3223 {
3224     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3225     int ret;
3226 
3227     ath10k_pci_init_napi(ar);
3228 
3229     if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3230         ath10k_info(ar, "limiting irq mode to: %d\n",
3231                 ath10k_pci_irq_mode);
3232 
3233     /* Try MSI */
3234     if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3235         ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3236         ret = pci_enable_msi(ar_pci->pdev);
3237         if (ret == 0)
3238             return 0;
3239 
3240         /* MHI failed, try legacy irq next */
3241     }
3242 
3243     /* Try legacy irq
3244      *
3245      * A potential race occurs here: The CORE_BASE write
3246      * depends on target correctly decoding AXI address but
3247      * host won't know when target writes BAR to CORE_CTRL.
3248      * This write might get lost if target has NOT written BAR.
3249      * For now, fix the race by repeating the write in below
3250      * synchronization checking.
3251      */
3252     ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3253 
3254     ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3255                PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3256 
3257     return 0;
3258 }
3259 
3260 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3261 {
3262     ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3263                0);
3264 }
3265 
3266 static int ath10k_pci_deinit_irq(struct ath10k *ar)
3267 {
3268     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3269 
3270     switch (ar_pci->oper_irq_mode) {
3271     case ATH10K_PCI_IRQ_LEGACY:
3272         ath10k_pci_deinit_irq_legacy(ar);
3273         break;
3274     default:
3275         pci_disable_msi(ar_pci->pdev);
3276         break;
3277     }
3278 
3279     return 0;
3280 }
3281 
3282 int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3283 {
3284     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3285     unsigned long timeout;
3286     u32 val;
3287 
3288     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3289 
3290     timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3291 
3292     do {
3293         val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3294 
3295         ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3296                val);
3297 
3298         /* target should never return this */
3299         if (val == 0xffffffff)
3300             continue;
3301 
3302         /* the device has crashed so don't bother trying anymore */
3303         if (val & FW_IND_EVENT_PENDING)
3304             break;
3305 
3306         if (val & FW_IND_INITIALIZED)
3307             break;
3308 
3309         if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3310             /* Fix potential race by repeating CORE_BASE writes */
3311             ath10k_pci_enable_legacy_irq(ar);
3312 
3313         mdelay(10);
3314     } while (time_before(jiffies, timeout));
3315 
3316     ath10k_pci_disable_and_clear_legacy_irq(ar);
3317     ath10k_pci_irq_msi_fw_mask(ar);
3318 
3319     if (val == 0xffffffff) {
3320         ath10k_err(ar, "failed to read device register, device is gone\n");
3321         return -EIO;
3322     }
3323 
3324     if (val & FW_IND_EVENT_PENDING) {
3325         ath10k_warn(ar, "device has crashed during init\n");
3326         return -ECOMM;
3327     }
3328 
3329     if (!(val & FW_IND_INITIALIZED)) {
3330         ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3331                val);
3332         return -ETIMEDOUT;
3333     }
3334 
3335     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3336     return 0;
3337 }
3338 
3339 static int ath10k_pci_cold_reset(struct ath10k *ar)
3340 {
3341     u32 val;
3342 
3343     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3344 
3345     spin_lock_bh(&ar->data_lock);
3346 
3347     ar->stats.fw_cold_reset_counter++;
3348 
3349     spin_unlock_bh(&ar->data_lock);
3350 
3351     /* Put Target, including PCIe, into RESET. */
3352     val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3353     val |= 1;
3354     ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3355 
3356     /* After writing into SOC_GLOBAL_RESET to put device into
3357      * reset and pulling out of reset pcie may not be stable
3358      * for any immediate pcie register access and cause bus error,
3359      * add delay before any pcie access request to fix this issue.
3360      */
3361     msleep(20);
3362 
3363     /* Pull Target, including PCIe, out of RESET. */
3364     val &= ~1;
3365     ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3366 
3367     msleep(20);
3368 
3369     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3370 
3371     return 0;
3372 }
3373 
3374 static int ath10k_pci_claim(struct ath10k *ar)
3375 {
3376     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3377     struct pci_dev *pdev = ar_pci->pdev;
3378     int ret;
3379 
3380     pci_set_drvdata(pdev, ar);
3381 
3382     ret = pci_enable_device(pdev);
3383     if (ret) {
3384         ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3385         return ret;
3386     }
3387 
3388     ret = pci_request_region(pdev, BAR_NUM, "ath");
3389     if (ret) {
3390         ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3391                ret);
3392         goto err_device;
3393     }
3394 
3395     /* Target expects 32 bit DMA. Enforce it. */
3396     ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3397     if (ret) {
3398         ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3399         goto err_region;
3400     }
3401 
3402     pci_set_master(pdev);
3403 
3404     /* Arrange for access to Target SoC registers. */
3405     ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3406     ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3407     if (!ar_pci->mem) {
3408         ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3409         ret = -EIO;
3410         goto err_master;
3411     }
3412 
3413     ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3414     return 0;
3415 
3416 err_master:
3417     pci_clear_master(pdev);
3418 
3419 err_region:
3420     pci_release_region(pdev, BAR_NUM);
3421 
3422 err_device:
3423     pci_disable_device(pdev);
3424 
3425     return ret;
3426 }
3427 
3428 static void ath10k_pci_release(struct ath10k *ar)
3429 {
3430     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3431     struct pci_dev *pdev = ar_pci->pdev;
3432 
3433     pci_iounmap(pdev, ar_pci->mem);
3434     pci_release_region(pdev, BAR_NUM);
3435     pci_clear_master(pdev);
3436     pci_disable_device(pdev);
3437 }
3438 
3439 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3440 {
3441     const struct ath10k_pci_supp_chip *supp_chip;
3442     int i;
3443     u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3444 
3445     for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3446         supp_chip = &ath10k_pci_supp_chips[i];
3447 
3448         if (supp_chip->dev_id == dev_id &&
3449             supp_chip->rev_id == rev_id)
3450             return true;
3451     }
3452 
3453     return false;
3454 }
3455 
3456 int ath10k_pci_setup_resource(struct ath10k *ar)
3457 {
3458     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3459     struct ath10k_ce *ce = ath10k_ce_priv(ar);
3460     int ret;
3461 
3462     spin_lock_init(&ce->ce_lock);
3463     spin_lock_init(&ar_pci->ps_lock);
3464     mutex_init(&ar_pci->ce_diag_mutex);
3465 
3466     INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
3467 
3468     timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3469 
3470     ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
3471                    sizeof(pci_host_ce_config_wlan),
3472                    GFP_KERNEL);
3473     if (!ar_pci->attr)
3474         return -ENOMEM;
3475 
3476     ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
3477                       sizeof(pci_target_ce_config_wlan),
3478                       GFP_KERNEL);
3479     if (!ar_pci->pipe_config) {
3480         ret = -ENOMEM;
3481         goto err_free_attr;
3482     }
3483 
3484     ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
3485                        sizeof(pci_target_service_to_ce_map_wlan),
3486                        GFP_KERNEL);
3487     if (!ar_pci->serv_to_pipe) {
3488         ret = -ENOMEM;
3489         goto err_free_pipe_config;
3490     }
3491 
3492     if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3493         ath10k_pci_override_ce_config(ar);
3494 
3495     ret = ath10k_pci_alloc_pipes(ar);
3496     if (ret) {
3497         ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3498                ret);
3499         goto err_free_serv_to_pipe;
3500     }
3501 
3502     return 0;
3503 
3504 err_free_serv_to_pipe:
3505     kfree(ar_pci->serv_to_pipe);
3506 err_free_pipe_config:
3507     kfree(ar_pci->pipe_config);
3508 err_free_attr:
3509     kfree(ar_pci->attr);
3510     return ret;
3511 }
3512 
3513 void ath10k_pci_release_resource(struct ath10k *ar)
3514 {
3515     struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3516 
3517     ath10k_pci_rx_retry_sync(ar);
3518     netif_napi_del(&ar->napi);
3519     ath10k_pci_ce_deinit(ar);
3520     ath10k_pci_free_pipes(ar);
3521     kfree(ar_pci->attr);
3522     kfree(ar_pci->pipe_config);
3523     kfree(ar_pci->serv_to_pipe);
3524 }
3525 
3526 static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3527     .read32     = ath10k_bus_pci_read32,
3528     .write32    = ath10k_bus_pci_write32,
3529     .get_num_banks  = ath10k_pci_get_num_banks,
3530 };
3531 
3532 static int ath10k_pci_probe(struct pci_dev *pdev,
3533                 const struct pci_device_id *pci_dev)
3534 {
3535     int ret = 0;
3536     struct ath10k *ar;
3537     struct ath10k_pci *ar_pci;
3538     enum ath10k_hw_rev hw_rev;
3539     struct ath10k_bus_params bus_params = {};
3540     bool pci_ps, is_qca988x = false;
3541     int (*pci_soft_reset)(struct ath10k *ar);
3542     int (*pci_hard_reset)(struct ath10k *ar);
3543     u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3544 
3545     switch (pci_dev->device) {
3546     case QCA988X_2_0_DEVICE_ID_UBNT:
3547     case QCA988X_2_0_DEVICE_ID:
3548         hw_rev = ATH10K_HW_QCA988X;
3549         pci_ps = false;
3550         is_qca988x = true;
3551         pci_soft_reset = ath10k_pci_warm_reset;
3552         pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3553         targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3554         break;
3555     case QCA9887_1_0_DEVICE_ID:
3556         hw_rev = ATH10K_HW_QCA9887;
3557         pci_ps = false;
3558         pci_soft_reset = ath10k_pci_warm_reset;
3559         pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3560         targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3561         break;
3562     case QCA6164_2_1_DEVICE_ID:
3563     case QCA6174_2_1_DEVICE_ID:
3564         hw_rev = ATH10K_HW_QCA6174;
3565         pci_ps = true;
3566         pci_soft_reset = ath10k_pci_warm_reset;
3567         pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3568         targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3569         break;
3570     case QCA99X0_2_0_DEVICE_ID:
3571         hw_rev = ATH10K_HW_QCA99X0;
3572         pci_ps = false;
3573         pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3574         pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3575         targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3576         break;
3577     case QCA9984_1_0_DEVICE_ID:
3578         hw_rev = ATH10K_HW_QCA9984;
3579         pci_ps = false;
3580         pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3581         pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3582         targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3583         break;
3584     case QCA9888_2_0_DEVICE_ID:
3585         hw_rev = ATH10K_HW_QCA9888;
3586         pci_ps = false;
3587         pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3588         pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3589         targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3590         break;
3591     case QCA9377_1_0_DEVICE_ID:
3592         hw_rev = ATH10K_HW_QCA9377;
3593         pci_ps = true;
3594         pci_soft_reset = ath10k_pci_warm_reset;
3595         pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3596         targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3597         break;
3598     default:
3599         WARN_ON(1);
3600         return -ENOTSUPP;
3601     }
3602 
3603     ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3604                 hw_rev, &ath10k_pci_hif_ops);
3605     if (!ar) {
3606         dev_err(&pdev->dev, "failed to allocate core\n");
3607         return -ENOMEM;
3608     }
3609 
3610     ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3611            pdev->vendor, pdev->device,
3612            pdev->subsystem_vendor, pdev->subsystem_device);
3613 
3614     ar_pci = ath10k_pci_priv(ar);
3615     ar_pci->pdev = pdev;
3616     ar_pci->dev = &pdev->dev;
3617     ar_pci->ar = ar;
3618     ar->dev_id = pci_dev->device;
3619     ar_pci->pci_ps = pci_ps;
3620     ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3621     ar_pci->pci_soft_reset = pci_soft_reset;
3622     ar_pci->pci_hard_reset = pci_hard_reset;
3623     ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3624     ar->ce_priv = &ar_pci->ce;
3625 
3626     ar->id.vendor = pdev->vendor;
3627     ar->id.device = pdev->device;
3628     ar->id.subsystem_vendor = pdev->subsystem_vendor;
3629     ar->id.subsystem_device = pdev->subsystem_device;
3630 
3631     timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3632 
3633     ret = ath10k_pci_setup_resource(ar);
3634     if (ret) {
3635         ath10k_err(ar, "failed to setup resource: %d\n", ret);
3636         goto err_core_destroy;
3637     }
3638 
3639     ret = ath10k_pci_claim(ar);
3640     if (ret) {
3641         ath10k_err(ar, "failed to claim device: %d\n", ret);
3642         goto err_free_pipes;
3643     }
3644 
3645     ret = ath10k_pci_force_wake(ar);
3646     if (ret) {
3647         ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3648         goto err_sleep;
3649     }
3650 
3651     ath10k_pci_ce_deinit(ar);
3652     ath10k_pci_irq_disable(ar);
3653 
3654     ret = ath10k_pci_init_irq(ar);
3655     if (ret) {
3656         ath10k_err(ar, "failed to init irqs: %d\n", ret);
3657         goto err_sleep;
3658     }
3659 
3660     ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3661             ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3662             ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3663 
3664     ret = ath10k_pci_request_irq(ar);
3665     if (ret) {
3666         ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3667         goto err_deinit_irq;
3668     }
3669 
3670     bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3671     bus_params.link_can_suspend = true;
3672     /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
3673      * fall off the bus during chip_reset. These chips have the same pci
3674      * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
3675      */
3676     if (is_qca988x) {
3677         bus_params.chip_id =
3678             ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3679         if (bus_params.chip_id != 0xffffffff) {
3680             if (!ath10k_pci_chip_is_supported(pdev->device,
3681                               bus_params.chip_id)) {
3682                 ret = -ENODEV;
3683                 goto err_unsupported;
3684             }
3685         }
3686     }
3687 
3688     ret = ath10k_pci_chip_reset(ar);
3689     if (ret) {
3690         ath10k_err(ar, "failed to reset chip: %d\n", ret);
3691         goto err_free_irq;
3692     }
3693 
3694     bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3695     if (bus_params.chip_id == 0xffffffff) {
3696         ret = -ENODEV;
3697         goto err_unsupported;
3698     }
3699 
3700     if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3701         ret = -ENODEV;
3702         goto err_unsupported;
3703     }
3704 
3705     ret = ath10k_core_register(ar, &bus_params);
3706     if (ret) {
3707         ath10k_err(ar, "failed to register driver core: %d\n", ret);
3708         goto err_free_irq;
3709     }
3710 
3711     return 0;
3712 
3713 err_unsupported:
3714     ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3715            pdev->device, bus_params.chip_id);
3716 
3717 err_free_irq:
3718     ath10k_pci_free_irq(ar);
3719 
3720 err_deinit_irq:
3721     ath10k_pci_release_resource(ar);
3722 
3723 err_sleep:
3724     ath10k_pci_sleep_sync(ar);
3725     ath10k_pci_release(ar);
3726 
3727 err_free_pipes:
3728     ath10k_pci_free_pipes(ar);
3729 
3730 err_core_destroy:
3731     ath10k_core_destroy(ar);
3732 
3733     return ret;
3734 }
3735 
3736 static void ath10k_pci_remove(struct pci_dev *pdev)
3737 {
3738     struct ath10k *ar = pci_get_drvdata(pdev);
3739 
3740     ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3741 
3742     if (!ar)
3743         return;
3744 
3745     ath10k_core_unregister(ar);
3746     ath10k_pci_free_irq(ar);
3747     ath10k_pci_deinit_irq(ar);
3748     ath10k_pci_release_resource(ar);
3749     ath10k_pci_sleep_sync(ar);
3750     ath10k_pci_release(ar);
3751     ath10k_core_destroy(ar);
3752 }
3753 
3754 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3755 
3756 static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3757 {
3758     struct ath10k *ar = dev_get_drvdata(dev);
3759     int ret;
3760 
3761     ret = ath10k_pci_suspend(ar);
3762     if (ret)
3763         ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3764 
3765     return ret;
3766 }
3767 
3768 static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3769 {
3770     struct ath10k *ar = dev_get_drvdata(dev);
3771     int ret;
3772 
3773     ret = ath10k_pci_resume(ar);
3774     if (ret)
3775         ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3776 
3777     return ret;
3778 }
3779 
3780 static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3781              ath10k_pci_pm_suspend,
3782              ath10k_pci_pm_resume);
3783 
3784 static struct pci_driver ath10k_pci_driver = {
3785     .name = "ath10k_pci",
3786     .id_table = ath10k_pci_id_table,
3787     .probe = ath10k_pci_probe,
3788     .remove = ath10k_pci_remove,
3789 #ifdef CONFIG_PM
3790     .driver.pm = &ath10k_pci_pm_ops,
3791 #endif
3792 };
3793 
3794 static int __init ath10k_pci_init(void)
3795 {
3796     int ret;
3797 
3798     ret = pci_register_driver(&ath10k_pci_driver);
3799     if (ret)
3800         printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3801                ret);
3802 
3803     ret = ath10k_ahb_init();
3804     if (ret)
3805         printk(KERN_ERR "ahb init failed: %d\n", ret);
3806 
3807     return ret;
3808 }
3809 module_init(ath10k_pci_init);
3810 
3811 static void __exit ath10k_pci_exit(void)
3812 {
3813     pci_unregister_driver(&ath10k_pci_driver);
3814     ath10k_ahb_exit();
3815 }
3816 
3817 module_exit(ath10k_pci_exit);
3818 
3819 MODULE_AUTHOR("Qualcomm Atheros");
3820 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3821 MODULE_LICENSE("Dual BSD/GPL");
3822 
3823 /* QCA988x 2.0 firmware files */
3824 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3825 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3826 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3827 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3828 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3829 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3830 
3831 /* QCA9887 1.0 firmware files */
3832 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3833 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3834 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3835 
3836 /* QCA6174 2.1 firmware files */
3837 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3838 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3839 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3840 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3841 
3842 /* QCA6174 3.1 firmware files */
3843 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3844 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3845 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3846 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3847 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3848 
3849 /* QCA9377 1.0 firmware files */
3850 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3851 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3852 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);