Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /* Copyright (c) 2018 Quantenna Communications */
0003 
0004 #include <linux/kernel.h>
0005 #include <linux/firmware.h>
0006 #include <linux/pci.h>
0007 #include <linux/vmalloc.h>
0008 #include <linux/delay.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/sched.h>
0011 #include <linux/crc32.h>
0012 #include <linux/completion.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/circ_buf.h>
0015 
0016 #include "pcie_priv.h"
0017 #include "topaz_pcie_regs.h"
0018 #include "topaz_pcie_ipc.h"
0019 #include "qtn_hw_ids.h"
0020 #include "core.h"
0021 #include "bus.h"
0022 #include "shm_ipc.h"
0023 #include "debug.h"
0024 
0025 #define TOPAZ_TX_BD_SIZE_DEFAULT    128
0026 #define TOPAZ_RX_BD_SIZE_DEFAULT    256
0027 
0028 struct qtnf_topaz_tx_bd {
0029     __le32 addr;
0030     __le32 info;
0031 } __packed;
0032 
0033 struct qtnf_topaz_rx_bd {
0034     __le32 addr;
0035     __le32 info;
0036 } __packed;
0037 
0038 struct qtnf_extra_bd_params {
0039     __le32 param1;
0040     __le32 param2;
0041     __le32 param3;
0042     __le32 param4;
0043 } __packed;
0044 
0045 #define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n)
0046 
0047 struct vmac_pkt_info {
0048     __le32 addr;
0049     __le32 info;
0050 };
0051 
0052 struct qtnf_topaz_bda {
0053     __le16  bda_len;
0054     __le16  bda_version;
0055     __le32  bda_bootstate;
0056     __le32  bda_dma_mask;
0057     __le32  bda_dma_offset;
0058     __le32  bda_flags;
0059     __le32  bda_img;
0060     __le32  bda_img_size;
0061     __le32  bda_ep2h_irqstatus;
0062     __le32  bda_h2ep_irqstatus;
0063     __le32  bda_msi_addr;
0064     u8  reserved1[56];
0065     __le32  bda_flashsz;
0066     u8  bda_boardname[PCIE_BDA_NAMELEN];
0067     __le32  bda_pci_pre_status;
0068     __le32  bda_pci_endian;
0069     __le32  bda_pci_post_status;
0070     __le32  bda_h2ep_txd_budget;
0071     __le32  bda_ep2h_txd_budget;
0072     __le32  bda_rc_rx_bd_base;
0073     __le32  bda_rc_rx_bd_num;
0074     __le32  bda_rc_tx_bd_base;
0075     __le32  bda_rc_tx_bd_num;
0076     u8  bda_ep_link_state;
0077     u8  bda_rc_link_state;
0078     u8  bda_rc_msi_enabled;
0079     u8  reserved2;
0080     __le32  bda_ep_next_pkt;
0081     struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN];
0082     struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096);
0083     struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096);
0084 } __packed;
0085 
0086 struct qtnf_pcie_topaz_state {
0087     struct qtnf_pcie_bus_priv base;
0088     struct qtnf_topaz_bda __iomem *bda;
0089 
0090     dma_addr_t dma_msi_dummy;
0091     u32 dma_msi_imwr;
0092 
0093     struct qtnf_topaz_tx_bd *tx_bd_vbase;
0094     struct qtnf_topaz_rx_bd *rx_bd_vbase;
0095 
0096     __le32 __iomem *ep_next_rx_pkt;
0097     __le32 __iomem *txqueue_wake;
0098     __le32 __iomem *ep_pmstate;
0099 
0100     unsigned long rx_pkt_count;
0101 };
0102 
0103 static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
0104 {
0105     void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
0106     u32 cfg;
0107 
0108     cfg = readl(reg);
0109     cfg &= ~TOPAZ_ASSERT_INTX;
0110     qtnf_non_posted_write(cfg, reg);
0111 }
0112 
0113 static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
0114 {
0115     void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
0116     u32 cfg = readl(reg);
0117 
0118     return !!(cfg & TOPAZ_ASSERT_INTX);
0119 }
0120 
0121 static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
0122 {
0123     writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ),
0124            TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
0125     msleep(QTN_EP_RESET_WAIT_MS);
0126     pci_restore_state(ts->base.pdev);
0127 }
0128 
0129 static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
0130 {
0131     void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
0132 
0133     ts->dma_msi_imwr = readl(reg);
0134 }
0135 
0136 static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
0137 {
0138     void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
0139 
0140     qtnf_non_posted_write(ts->dma_msi_imwr, reg);
0141 }
0142 
0143 static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
0144 {
0145     void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
0146 
0147     qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
0148 }
0149 
0150 static void qtnf_topaz_ipc_gen_ep_int(void *arg)
0151 {
0152     struct qtnf_pcie_topaz_state *ts = arg;
0153 
0154     writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ),
0155            TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
0156 }
0157 
0158 static int qtnf_is_state(__le32 __iomem *reg, u32 state)
0159 {
0160     u32 s = readl(reg);
0161 
0162     return (s == state);
0163 }
0164 
0165 static void qtnf_set_state(__le32 __iomem *reg, u32 state)
0166 {
0167     qtnf_non_posted_write(state, reg);
0168 }
0169 
0170 static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
0171 {
0172     u32 timeout = 0;
0173 
0174     while ((qtnf_is_state(reg, state) == 0)) {
0175         usleep_range(1000, 1200);
0176         if (++timeout > delay_in_ms)
0177             return -1;
0178     }
0179 
0180     return 0;
0181 }
0182 
0183 static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
0184                 struct qtnf_topaz_bda __iomem *bda)
0185 {
0186     struct qtnf_extra_bd_params __iomem *extra_params;
0187     struct qtnf_pcie_bus_priv *priv = &ts->base;
0188     dma_addr_t paddr;
0189     void *vaddr;
0190     int len;
0191     int i;
0192 
0193     /* bd table */
0194 
0195     len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) +
0196         priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) +
0197             sizeof(struct qtnf_extra_bd_params);
0198 
0199     vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
0200     if (!vaddr)
0201         return -ENOMEM;
0202 
0203     /* tx bd */
0204 
0205     ts->tx_bd_vbase = vaddr;
0206     qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base);
0207 
0208     for (i = 0; i < priv->tx_bd_num; i++)
0209         ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
0210 
0211     pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
0212 
0213     priv->tx_bd_r_index = 0;
0214     priv->tx_bd_w_index = 0;
0215 
0216     /* rx bd */
0217 
0218     vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
0219     paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd);
0220 
0221     ts->rx_bd_vbase = vaddr;
0222     qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base);
0223 
0224     pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
0225 
0226     /* extra shared params */
0227 
0228     vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
0229     paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd);
0230 
0231     extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
0232 
0233     ts->ep_next_rx_pkt = &extra_params->param1;
0234     qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1),
0235                   &bda->bda_ep_next_pkt);
0236     ts->txqueue_wake = &extra_params->param2;
0237     ts->ep_pmstate = &extra_params->param3;
0238     ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
0239 
0240     return 0;
0241 }
0242 
0243 static int
0244 topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
0245 {
0246     struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
0247     struct sk_buff *skb;
0248     dma_addr_t paddr;
0249 
0250     skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
0251     if (!skb) {
0252         ts->base.rx_skb[index] = NULL;
0253         return -ENOMEM;
0254     }
0255 
0256     ts->base.rx_skb[index] = skb;
0257 
0258     paddr = dma_map_single(&ts->base.pdev->dev, skb->data, SKB_BUF_SIZE,
0259                    DMA_FROM_DEVICE);
0260     if (dma_mapping_error(&ts->base.pdev->dev, paddr)) {
0261         pr_err("skb mapping error: %pad\n", &paddr);
0262         return -ENOMEM;
0263     }
0264 
0265     rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
0266     rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap);
0267 
0268     ts->base.rx_bd_w_index = index;
0269 
0270     return 0;
0271 }
0272 
0273 static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
0274 {
0275     u16 i;
0276     int ret = 0;
0277 
0278     memset(ts->rx_bd_vbase, 0x0,
0279            ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
0280 
0281     for (i = 0; i < ts->base.rx_bd_num; i++) {
0282         ret = topaz_skb2rbd_attach(ts, i, 0);
0283         if (ret)
0284             break;
0285     }
0286 
0287     ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
0288                         cpu_to_le32(QTN_BD_WRAP);
0289 
0290     return ret;
0291 }
0292 
0293 /* all rx/tx activity should have ceased before calling this function */
0294 static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
0295 {
0296     struct qtnf_pcie_bus_priv *priv = &ts->base;
0297     struct qtnf_topaz_rx_bd *rxbd;
0298     struct qtnf_topaz_tx_bd *txbd;
0299     struct sk_buff *skb;
0300     dma_addr_t paddr;
0301     int i;
0302 
0303     /* free rx buffers */
0304     for (i = 0; i < priv->rx_bd_num; i++) {
0305         if (priv->rx_skb && priv->rx_skb[i]) {
0306             rxbd = &ts->rx_bd_vbase[i];
0307             skb = priv->rx_skb[i];
0308             paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
0309             dma_unmap_single(&priv->pdev->dev, paddr,
0310                      SKB_BUF_SIZE, DMA_FROM_DEVICE);
0311             dev_kfree_skb_any(skb);
0312             priv->rx_skb[i] = NULL;
0313             rxbd->addr = 0;
0314             rxbd->info = 0;
0315         }
0316     }
0317 
0318     /* free tx buffers */
0319     for (i = 0; i < priv->tx_bd_num; i++) {
0320         if (priv->tx_skb && priv->tx_skb[i]) {
0321             txbd = &ts->tx_bd_vbase[i];
0322             skb = priv->tx_skb[i];
0323             paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
0324             dma_unmap_single(&priv->pdev->dev, paddr,
0325                      SKB_BUF_SIZE, DMA_TO_DEVICE);
0326             dev_kfree_skb_any(skb);
0327             priv->tx_skb[i] = NULL;
0328             txbd->addr = 0;
0329             txbd->info = 0;
0330         }
0331     }
0332 }
0333 
0334 static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
0335                      unsigned int tx_bd_size,
0336                      unsigned int rx_bd_size)
0337 {
0338     struct qtnf_topaz_bda __iomem *bda = ts->bda;
0339     struct qtnf_pcie_bus_priv *priv = &ts->base;
0340     int ret;
0341 
0342     if (tx_bd_size == 0)
0343         tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT;
0344 
0345     /* check TX BD queue max length according to struct qtnf_topaz_bda */
0346     if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) {
0347         pr_warn("TX BD queue cannot exceed %d\n",
0348             QTN_PCIE_RC_TX_QUEUE_LEN);
0349         tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN;
0350     }
0351 
0352     priv->tx_bd_num = tx_bd_size;
0353     qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
0354 
0355     if (rx_bd_size == 0)
0356         rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
0357 
0358     if (rx_bd_size > TOPAZ_RX_BD_SIZE_DEFAULT) {
0359         pr_warn("RX BD queue cannot exceed %d\n",
0360             TOPAZ_RX_BD_SIZE_DEFAULT);
0361         rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
0362     }
0363 
0364     priv->rx_bd_num = rx_bd_size;
0365     qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
0366 
0367     priv->rx_bd_w_index = 0;
0368     priv->rx_bd_r_index = 0;
0369 
0370     ret = qtnf_pcie_alloc_skb_array(priv);
0371     if (ret) {
0372         pr_err("failed to allocate skb array\n");
0373         return ret;
0374     }
0375 
0376     ret = topaz_alloc_bd_table(ts, bda);
0377     if (ret) {
0378         pr_err("failed to allocate bd table\n");
0379         return ret;
0380     }
0381 
0382     ret = topaz_alloc_rx_buffers(ts);
0383     if (ret) {
0384         pr_err("failed to allocate rx buffers\n");
0385         return ret;
0386     }
0387 
0388     return ret;
0389 }
0390 
0391 static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
0392 {
0393     struct qtnf_pcie_bus_priv *priv = &ts->base;
0394     struct qtnf_topaz_tx_bd *txbd;
0395     struct sk_buff *skb;
0396     unsigned long flags;
0397     dma_addr_t paddr;
0398     u32 tx_done_index;
0399     int count = 0;
0400     int i;
0401 
0402     spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
0403 
0404     tx_done_index = readl(ts->ep_next_rx_pkt);
0405     i = priv->tx_bd_r_index;
0406 
0407     if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num))
0408         writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
0409                TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
0410 
0411     while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
0412         skb = priv->tx_skb[i];
0413 
0414         if (likely(skb)) {
0415             txbd = &ts->tx_bd_vbase[i];
0416             paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
0417             dma_unmap_single(&priv->pdev->dev, paddr, skb->len,
0418                      DMA_TO_DEVICE);
0419 
0420             if (skb->dev) {
0421                 dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
0422                 if (unlikely(priv->tx_stopped)) {
0423                     qtnf_wake_all_queues(skb->dev);
0424                     priv->tx_stopped = 0;
0425                 }
0426             }
0427 
0428             dev_kfree_skb_any(skb);
0429         }
0430 
0431         priv->tx_skb[i] = NULL;
0432         count++;
0433 
0434         if (++i >= priv->tx_bd_num)
0435             i = 0;
0436     }
0437 
0438     priv->tx_reclaim_done += count;
0439     priv->tx_reclaim_req++;
0440     priv->tx_bd_r_index = i;
0441 
0442     spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
0443 }
0444 
0445 static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev)
0446 {
0447     struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
0448 
0449     if (ndev) {
0450         netif_tx_stop_all_queues(ndev);
0451         ts->base.tx_stopped = 1;
0452     }
0453 
0454     writel(0x0, ts->txqueue_wake);
0455 
0456     /* sync up tx queue status before generating interrupt */
0457     dma_wmb();
0458 
0459     /* send irq to card: tx stopped */
0460     writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
0461            TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
0462 
0463     /* schedule reclaim attempt */
0464     tasklet_hi_schedule(&ts->base.reclaim_tq);
0465 }
0466 
0467 static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev)
0468 {
0469     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
0470     int ready;
0471 
0472     ready = readl(ts->txqueue_wake);
0473     if (ready) {
0474         netif_wake_queue(ndev);
0475     } else {
0476         /* re-send irq to card: tx stopped */
0477         writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
0478                TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
0479     }
0480 }
0481 
0482 static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
0483 {
0484     struct qtnf_pcie_bus_priv *priv = &ts->base;
0485 
0486     if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
0487             priv->tx_bd_num)) {
0488         qtnf_topaz_data_tx_reclaim(ts);
0489 
0490         if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
0491                 priv->tx_bd_num)) {
0492             priv->tx_full_count++;
0493             return 0;
0494         }
0495     }
0496 
0497     return 1;
0498 }
0499 
0500 static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
0501                  unsigned int macid, unsigned int vifid)
0502 {
0503     struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
0504     struct qtnf_pcie_bus_priv *priv = &ts->base;
0505     struct qtnf_topaz_bda __iomem *bda = ts->bda;
0506     struct qtnf_topaz_tx_bd *txbd;
0507     dma_addr_t skb_paddr;
0508     unsigned long flags;
0509     int ret = 0;
0510     int len;
0511     int i;
0512 
0513     spin_lock_irqsave(&priv->tx_lock, flags);
0514 
0515     if (!qtnf_tx_queue_ready(ts)) {
0516         qtnf_try_stop_xmit(bus, skb->dev);
0517         spin_unlock_irqrestore(&priv->tx_lock, flags);
0518         return NETDEV_TX_BUSY;
0519     }
0520 
0521     i = priv->tx_bd_w_index;
0522     priv->tx_skb[i] = skb;
0523     len = skb->len;
0524 
0525     skb_paddr = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
0526                    DMA_TO_DEVICE);
0527     if (dma_mapping_error(&priv->pdev->dev, skb_paddr)) {
0528         ret = -ENOMEM;
0529         goto tx_done;
0530     }
0531 
0532     txbd = &ts->tx_bd_vbase[i];
0533     txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
0534 
0535     writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr);
0536     writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info);
0537 
0538     /* sync up descriptor updates before generating interrupt */
0539     dma_wmb();
0540 
0541     /* generate irq to card: tx done */
0542     writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
0543            TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
0544 
0545     if (++i >= priv->tx_bd_num)
0546         i = 0;
0547 
0548     priv->tx_bd_w_index = i;
0549 
0550 tx_done:
0551     if (ret) {
0552         if (skb->dev)
0553             skb->dev->stats.tx_dropped++;
0554         dev_kfree_skb_any(skb);
0555     }
0556 
0557     priv->tx_done_count++;
0558     spin_unlock_irqrestore(&priv->tx_lock, flags);
0559 
0560     qtnf_topaz_data_tx_reclaim(ts);
0561 
0562     return NETDEV_TX_OK;
0563 }
0564 
0565 static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
0566 {
0567     struct qtnf_bus *bus = (struct qtnf_bus *)data;
0568     struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
0569     struct qtnf_pcie_bus_priv *priv = &ts->base;
0570 
0571     if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
0572         return IRQ_NONE;
0573 
0574     if (!priv->msi_enabled)
0575         qtnf_deassert_intx(ts);
0576 
0577     priv->pcie_irq_count++;
0578 
0579     qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
0580     qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
0581 
0582     if (napi_schedule_prep(&bus->mux_napi)) {
0583         disable_rx_irqs(ts);
0584         __napi_schedule(&bus->mux_napi);
0585     }
0586 
0587     tasklet_hi_schedule(&priv->reclaim_tq);
0588 
0589     return IRQ_HANDLED;
0590 }
0591 
0592 static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
0593 {
0594     u16 index = ts->base.rx_bd_r_index;
0595     struct qtnf_topaz_rx_bd *rxbd;
0596     u32 descw;
0597 
0598     rxbd = &ts->rx_bd_vbase[index];
0599     descw = le32_to_cpu(rxbd->info);
0600 
0601     if (descw & QTN_BD_EMPTY)
0602         return 0;
0603 
0604     return 1;
0605 }
0606 
0607 static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
0608 {
0609     struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
0610     struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
0611     struct qtnf_pcie_bus_priv *priv = &ts->base;
0612     struct net_device *ndev = NULL;
0613     struct sk_buff *skb = NULL;
0614     int processed = 0;
0615     struct qtnf_topaz_rx_bd *rxbd;
0616     dma_addr_t skb_paddr;
0617     int consume;
0618     u32 descw;
0619     u32 poffset;
0620     u32 psize;
0621     u16 r_idx;
0622     u16 w_idx;
0623     int ret;
0624 
0625     while (processed < budget) {
0626         if (!qtnf_rx_data_ready(ts))
0627             goto rx_out;
0628 
0629         r_idx = priv->rx_bd_r_index;
0630         rxbd = &ts->rx_bd_vbase[r_idx];
0631         descw = le32_to_cpu(rxbd->info);
0632 
0633         skb = priv->rx_skb[r_idx];
0634         poffset = QTN_GET_OFFSET(descw);
0635         psize = QTN_GET_LEN(descw);
0636         consume = 1;
0637 
0638         if (descw & QTN_BD_EMPTY) {
0639             pr_warn("skip invalid rxbd[%d]\n", r_idx);
0640             consume = 0;
0641         }
0642 
0643         if (!skb) {
0644             pr_warn("skip missing rx_skb[%d]\n", r_idx);
0645             consume = 0;
0646         }
0647 
0648         if (skb && (skb_tailroom(skb) <  psize)) {
0649             pr_err("skip packet with invalid length: %u > %u\n",
0650                    psize, skb_tailroom(skb));
0651             consume = 0;
0652         }
0653 
0654         if (skb) {
0655             skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
0656             dma_unmap_single(&priv->pdev->dev, skb_paddr,
0657                      SKB_BUF_SIZE, DMA_FROM_DEVICE);
0658         }
0659 
0660         if (consume) {
0661             skb_reserve(skb, poffset);
0662             skb_put(skb, psize);
0663             ndev = qtnf_classify_skb(bus, skb);
0664             if (likely(ndev)) {
0665                 dev_sw_netstats_rx_add(ndev, skb->len);
0666                 skb->protocol = eth_type_trans(skb, ndev);
0667                 netif_receive_skb(skb);
0668             } else {
0669                 pr_debug("drop untagged skb\n");
0670                 bus->mux_dev.stats.rx_dropped++;
0671                 dev_kfree_skb_any(skb);
0672             }
0673         } else {
0674             if (skb) {
0675                 bus->mux_dev.stats.rx_dropped++;
0676                 dev_kfree_skb_any(skb);
0677             }
0678         }
0679 
0680         /* notify card about recv packets once per several packets */
0681         if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
0682             writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ),
0683                    TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
0684 
0685         priv->rx_skb[r_idx] = NULL;
0686         if (++r_idx >= priv->rx_bd_num)
0687             r_idx = 0;
0688 
0689         priv->rx_bd_r_index = r_idx;
0690 
0691         /* repalce processed buffer by a new one */
0692         w_idx = priv->rx_bd_w_index;
0693         while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
0694                   priv->rx_bd_num) > 0) {
0695             if (++w_idx >= priv->rx_bd_num)
0696                 w_idx = 0;
0697 
0698             ret = topaz_skb2rbd_attach(ts, w_idx,
0699                            descw & QTN_BD_WRAP);
0700             if (ret) {
0701                 pr_err("failed to allocate new rx_skb[%d]\n",
0702                        w_idx);
0703                 break;
0704             }
0705         }
0706 
0707         processed++;
0708     }
0709 
0710 rx_out:
0711     if (processed < budget) {
0712         napi_complete(napi);
0713         enable_rx_irqs(ts);
0714     }
0715 
0716     return processed;
0717 }
0718 
0719 static void
0720 qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
0721 {
0722     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
0723 
0724     qtnf_try_wake_xmit(bus, ndev);
0725     tasklet_hi_schedule(&ts->base.reclaim_tq);
0726 }
0727 
0728 static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
0729 {
0730     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
0731 
0732     napi_enable(&bus->mux_napi);
0733     enable_rx_irqs(ts);
0734 }
0735 
0736 static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
0737 {
0738     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
0739 
0740     disable_rx_irqs(ts);
0741     napi_disable(&bus->mux_napi);
0742 }
0743 
0744 static struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
0745     /* control path methods */
0746     .control_tx = qtnf_pcie_control_tx,
0747 
0748     /* data path methods */
0749     .data_tx        = qtnf_pcie_data_tx,
0750     .data_tx_timeout    = qtnf_pcie_data_tx_timeout,
0751     .data_rx_start      = qtnf_pcie_data_rx_start,
0752     .data_rx_stop       = qtnf_pcie_data_rx_stop,
0753 };
0754 
0755 static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
0756 {
0757     struct qtnf_bus *bus = dev_get_drvdata(s->private);
0758     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
0759 
0760     seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
0761 
0762     return 0;
0763 }
0764 
0765 static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
0766 {
0767     struct qtnf_bus *bus = dev_get_drvdata(s->private);
0768     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
0769     struct qtnf_pcie_bus_priv *priv = &ts->base;
0770     u32 tx_done_index = readl(ts->ep_next_rx_pkt);
0771 
0772     seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
0773     seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
0774     seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
0775     seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
0776 
0777     seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
0778     seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
0779     seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
0780 
0781     seq_printf(s, "tx host queue len(%u)\n",
0782            CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
0783                 priv->tx_bd_num));
0784     seq_printf(s, "tx reclaim queue len(%u)\n",
0785            CIRC_CNT(tx_done_index, priv->tx_bd_r_index,
0786                 priv->tx_bd_num));
0787     seq_printf(s, "tx card queue len(%u)\n",
0788            CIRC_CNT(priv->tx_bd_w_index, tx_done_index,
0789                 priv->tx_bd_num));
0790 
0791     seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
0792     seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
0793     seq_printf(s, "rx alloc queue len(%u)\n",
0794            CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
0795                   priv->rx_bd_num));
0796 
0797     return 0;
0798 }
0799 
0800 static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
0801 {
0802     struct qtnf_topaz_bda __iomem *bda = ts->bda;
0803     u32 offset = readl(&bda->bda_dma_offset);
0804 
0805     if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR)
0806         return;
0807 
0808     writel(0x0, &bda->bda_dma_offset);
0809 }
0810 
0811 static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
0812 {
0813     struct qtnf_topaz_bda __iomem *bda = ts->bda;
0814     u32 timeout = 0;
0815     u32 endian;
0816     int ret = 0;
0817 
0818     writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
0819 
0820     /* flush endian modifications before status update */
0821     dma_wmb();
0822 
0823     writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
0824 
0825     while (readl(&bda->bda_pci_post_status) !=
0826            QTN_PCI_ENDIAN_VALID_STATUS) {
0827         usleep_range(1000, 1200);
0828         if (++timeout > QTN_FW_DL_TIMEOUT_MS) {
0829             pr_err("card endianness detection timed out\n");
0830             ret = -ETIMEDOUT;
0831             goto endian_out;
0832         }
0833     }
0834 
0835     /* do not read before status is updated */
0836     dma_rmb();
0837 
0838     endian = readl(&bda->bda_pci_endian);
0839     WARN(endian != QTN_PCI_LITTLE_ENDIAN,
0840          "%s: unexpected card endianness", __func__);
0841 
0842 endian_out:
0843     writel(0, &bda->bda_pci_pre_status);
0844     writel(0, &bda->bda_pci_post_status);
0845     writel(0, &bda->bda_pci_endian);
0846 
0847     return ret;
0848 }
0849 
0850 static int qtnf_pre_init_ep(struct qtnf_bus *bus)
0851 {
0852     struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
0853     struct qtnf_topaz_bda __iomem *bda = ts->bda;
0854     u32 flags;
0855     int ret;
0856 
0857     ret = qtnf_pcie_endian_detect(ts);
0858     if (ret < 0) {
0859         pr_err("failed to detect card endianness\n");
0860         return ret;
0861     }
0862 
0863     writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
0864     qtnf_reset_dma_offset(ts);
0865 
0866     /* notify card about driver type and boot mode */
0867     flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV;
0868 
0869     if (ts->base.flashboot)
0870         flags |= QTN_BDA_FLASH_BOOT;
0871     else
0872         flags &= ~QTN_BDA_FLASH_BOOT;
0873 
0874     writel(flags, &bda->bda_flags);
0875 
0876     qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
0877     if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
0878                 QTN_FW_DL_TIMEOUT_MS)) {
0879         pr_err("card is not ready to boot...\n");
0880         return -ETIMEDOUT;
0881     }
0882 
0883     return ret;
0884 }
0885 
0886 static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
0887 {
0888     struct pci_dev *pdev = ts->base.pdev;
0889 
0890     setup_rx_irqs(ts);
0891     disable_rx_irqs(ts);
0892 
0893     if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
0894                 QTN_FW_QLINK_TIMEOUT_MS))
0895         return -ETIMEDOUT;
0896 
0897     enable_irq(pdev->irq);
0898     return 0;
0899 }
0900 
0901 static int
0902 qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
0903 {
0904     struct qtnf_topaz_bda __iomem *bda = ts->bda;
0905     struct pci_dev *pdev = ts->base.pdev;
0906     u32 remaining = fw_size;
0907     u8 *curr = (u8 *)fw;
0908     u32 blksize;
0909     u32 nblocks;
0910     u32 offset;
0911     u32 count;
0912     u32 size;
0913     dma_addr_t paddr;
0914     void *data;
0915     int ret = 0;
0916 
0917     pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size);
0918 
0919     blksize = ts->base.fw_blksize;
0920 
0921     if (blksize < PAGE_SIZE)
0922         blksize = PAGE_SIZE;
0923 
0924     while (blksize >= PAGE_SIZE) {
0925         pr_debug("allocating %u bytes to upload FW\n", blksize);
0926         data = dma_alloc_coherent(&pdev->dev, blksize,
0927                       &paddr, GFP_KERNEL);
0928         if (data)
0929             break;
0930         blksize /= 2;
0931     }
0932 
0933     if (!data) {
0934         pr_err("failed to allocate DMA buffer for FW upload\n");
0935         ret = -ENOMEM;
0936         goto fw_load_out;
0937     }
0938 
0939     nblocks = NBLOCKS(fw_size, blksize);
0940     offset = readl(&bda->bda_dma_offset);
0941 
0942     qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
0943     if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
0944                 QTN_FW_DL_TIMEOUT_MS)) {
0945         pr_err("card is not ready to download FW\n");
0946         ret = -ETIMEDOUT;
0947         goto fw_load_map;
0948     }
0949 
0950     for (count = 0 ; count < nblocks; count++) {
0951         size = (remaining > blksize) ? blksize : remaining;
0952 
0953         memcpy(data, curr, size);
0954         qtnf_non_posted_write(paddr + offset, &bda->bda_img);
0955         qtnf_non_posted_write(size, &bda->bda_img_size);
0956 
0957         pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
0958              count, (void *)curr, &paddr, size);
0959 
0960         qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
0961         if (qtnf_poll_state(&ts->bda->bda_bootstate,
0962                     QTN_BDA_FW_BLOCK_DONE,
0963                     QTN_FW_DL_TIMEOUT_MS)) {
0964             pr_err("confirmation for block #%d timed out\n", count);
0965             ret = -ETIMEDOUT;
0966             goto fw_load_map;
0967         }
0968 
0969         remaining = (remaining < size) ? remaining : (remaining - size);
0970         curr += size;
0971     }
0972 
0973     /* upload completion mark: zero-sized block */
0974     qtnf_non_posted_write(0, &bda->bda_img);
0975     qtnf_non_posted_write(0, &bda->bda_img_size);
0976 
0977     qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
0978     if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
0979                 QTN_FW_DL_TIMEOUT_MS)) {
0980         pr_err("confirmation for the last block timed out\n");
0981         ret = -ETIMEDOUT;
0982         goto fw_load_map;
0983     }
0984 
0985     /* RC is done */
0986     qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
0987     if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
0988                 QTN_FW_DL_TIMEOUT_MS)) {
0989         pr_err("confirmation for FW upload completion timed out\n");
0990         ret = -ETIMEDOUT;
0991         goto fw_load_map;
0992     }
0993 
0994     pr_debug("FW upload completed: totally sent %d blocks\n", count);
0995 
0996 fw_load_map:
0997     dma_free_coherent(&pdev->dev, blksize, data, paddr);
0998 
0999 fw_load_out:
1000     return ret;
1001 }
1002 
1003 static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
1004                 const char *fwname)
1005 {
1006     const struct firmware *fw;
1007     struct pci_dev *pdev = ts->base.pdev;
1008     int ret;
1009 
1010     if (qtnf_poll_state(&ts->bda->bda_bootstate,
1011                 QTN_BDA_FW_LOAD_RDY,
1012                 QTN_FW_DL_TIMEOUT_MS)) {
1013         pr_err("%s: card is not ready\n", fwname);
1014         return -1;
1015     }
1016 
1017     pr_info("starting firmware upload: %s\n", fwname);
1018 
1019     ret = request_firmware(&fw, fwname, &pdev->dev);
1020     if (ret < 0) {
1021         pr_err("%s: request_firmware error %d\n", fwname, ret);
1022         return -1;
1023     }
1024 
1025     ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
1026     release_firmware(fw);
1027 
1028     if (ret)
1029         pr_err("%s: FW upload error\n", fwname);
1030 
1031     return ret;
1032 }
1033 
1034 static void qtnf_topaz_fw_work_handler(struct work_struct *work)
1035 {
1036     struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
1037     struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
1038     int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
1039     struct pci_dev *pdev = ts->base.pdev;
1040     int ret;
1041 
1042     qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
1043 
1044     if (bootloader_needed) {
1045         ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
1046         if (ret)
1047             goto fw_load_exit;
1048 
1049         ret = qtnf_pre_init_ep(bus);
1050         if (ret)
1051             goto fw_load_exit;
1052 
1053         qtnf_set_state(&ts->bda->bda_bootstate,
1054                    QTN_BDA_FW_TARGET_BOOT);
1055     }
1056 
1057     if (ts->base.flashboot) {
1058         pr_info("booting firmware from flash\n");
1059 
1060         ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1061                       QTN_BDA_FW_FLASH_BOOT,
1062                       QTN_FW_DL_TIMEOUT_MS);
1063         if (ret)
1064             goto fw_load_exit;
1065     } else {
1066         ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
1067         if (ret)
1068             goto fw_load_exit;
1069 
1070         qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
1071         ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1072                       QTN_BDA_FW_CONFIG,
1073                       QTN_FW_QLINK_TIMEOUT_MS);
1074         if (ret) {
1075             pr_err("FW bringup timed out\n");
1076             goto fw_load_exit;
1077         }
1078 
1079         qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
1080         ret = qtnf_poll_state(&ts->bda->bda_bootstate,
1081                       QTN_BDA_FW_RUNNING,
1082                       QTN_FW_QLINK_TIMEOUT_MS);
1083         if (ret) {
1084             pr_err("card bringup timed out\n");
1085             goto fw_load_exit;
1086         }
1087     }
1088 
1089     ret = qtnf_post_init_ep(ts);
1090     if (ret) {
1091         pr_err("FW runtime failure\n");
1092         goto fw_load_exit;
1093     }
1094 
1095     pr_info("firmware is up and running\n");
1096 
1097     ret = qtnf_pcie_fw_boot_done(bus);
1098     if (ret)
1099         goto fw_load_exit;
1100 
1101     qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
1102     qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
1103 
1104 fw_load_exit:
1105     put_device(&pdev->dev);
1106 }
1107 
1108 static void qtnf_reclaim_tasklet_fn(struct tasklet_struct *t)
1109 {
1110     struct qtnf_pcie_topaz_state *ts = from_tasklet(ts, t, base.reclaim_tq);
1111 
1112     qtnf_topaz_data_tx_reclaim(ts);
1113 }
1114 
1115 static u64 qtnf_topaz_dma_mask_get(void)
1116 {
1117     return DMA_BIT_MASK(32);
1118 }
1119 
1120 static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
1121                  unsigned int tx_bd_num, unsigned int rx_bd_num)
1122 {
1123     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1124     struct pci_dev *pdev = ts->base.pdev;
1125     struct qtnf_shm_ipc_int ipc_int;
1126     unsigned long irqflags;
1127     int ret;
1128 
1129     bus->bus_ops = &qtnf_pcie_topaz_bus_ops;
1130     INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler);
1131     ts->bda = ts->base.epmem_bar;
1132 
1133     /* assign host msi irq before card init */
1134     if (ts->base.msi_enabled)
1135         irqflags = IRQF_NOBALANCING;
1136     else
1137         irqflags = IRQF_NOBALANCING | IRQF_SHARED;
1138 
1139     ret = devm_request_irq(&pdev->dev, pdev->irq,
1140                    &qtnf_pcie_topaz_interrupt,
1141                    irqflags, "qtnf_topaz_irq", (void *)bus);
1142     if (ret) {
1143         pr_err("failed to request pcie irq %d\n", pdev->irq);
1144         return ret;
1145     }
1146 
1147     disable_irq(pdev->irq);
1148 
1149     ret = qtnf_pre_init_ep(bus);
1150     if (ret) {
1151         pr_err("failed to init card\n");
1152         return ret;
1153     }
1154 
1155     ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num, rx_bd_num);
1156     if (ret) {
1157         pr_err("PCIE xfer init failed\n");
1158         return ret;
1159     }
1160 
1161     tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
1162     netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
1163                   qtnf_topaz_rx_poll, 10);
1164 
1165     ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
1166     ipc_int.arg = ts;
1167     qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
1168                    &ts->bda->bda_shm_reg2, &ipc_int);
1169 
1170     return 0;
1171 }
1172 
1173 static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus)
1174 {
1175     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1176 
1177     qtnf_topaz_reset_ep(ts);
1178     qtnf_topaz_free_xfer_buffers(ts);
1179 }
1180 
1181 #ifdef CONFIG_PM_SLEEP
1182 static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus)
1183 {
1184     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1185     struct pci_dev *pdev = ts->base.pdev;
1186 
1187     writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
1188     dma_wmb();
1189     writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
1190            TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1191 
1192     pci_save_state(pdev);
1193     pci_enable_wake(pdev, PCI_D3hot, 1);
1194     pci_set_power_state(pdev, PCI_D3hot);
1195 
1196     return 0;
1197 }
1198 
1199 static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus)
1200 {
1201     struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
1202     struct pci_dev *pdev = ts->base.pdev;
1203 
1204     pci_set_power_state(pdev, PCI_D0);
1205     pci_restore_state(pdev);
1206     pci_enable_wake(pdev, PCI_D0, 0);
1207 
1208     writel((u32 __force)PCI_D0, ts->ep_pmstate);
1209     dma_wmb();
1210     writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
1211            TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
1212 
1213     return 0;
1214 }
1215 #endif
1216 
1217 struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev)
1218 {
1219     struct qtnf_bus *bus;
1220     struct qtnf_pcie_topaz_state *ts;
1221 
1222     bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
1223     if (!bus)
1224         return NULL;
1225 
1226     ts = get_bus_priv(bus);
1227     ts->base.probe_cb = qtnf_pcie_topaz_probe;
1228     ts->base.remove_cb = qtnf_pcie_topaz_remove;
1229     ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
1230 #ifdef CONFIG_PM_SLEEP
1231     ts->base.resume_cb = qtnf_pcie_topaz_resume;
1232     ts->base.suspend_cb = qtnf_pcie_topaz_suspend;
1233 #endif
1234 
1235     return bus;
1236 }