Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Qualcomm PCIe Endpoint controller driver
0004  *
0005  * Copyright (c) 2020, The Linux Foundation. All rights reserved.
0006  * Author: Siddartha Mohanadoss <smohanad@codeaurora.org
0007  *
0008  * Copyright (c) 2021, Linaro Ltd.
0009  * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org
0010  */
0011 
0012 #include <linux/clk.h>
0013 #include <linux/delay.h>
0014 #include <linux/gpio/consumer.h>
0015 #include <linux/mfd/syscon.h>
0016 #include <linux/phy/phy.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/pm_domain.h>
0019 #include <linux/regmap.h>
0020 #include <linux/reset.h>
0021 #include <linux/module.h>
0022 
0023 #include "pcie-designware.h"
0024 
0025 /* PARF registers */
0026 #define PARF_SYS_CTRL               0x00
0027 #define PARF_DB_CTRL                0x10
0028 #define PARF_PM_CTRL                0x20
0029 #define PARF_MHI_BASE_ADDR_LOWER        0x178
0030 #define PARF_MHI_BASE_ADDR_UPPER        0x17c
0031 #define PARF_DEBUG_INT_EN           0x190
0032 #define PARF_AXI_MSTR_RD_HALT_NO_WRITES     0x1a4
0033 #define PARF_AXI_MSTR_WR_ADDR_HALT      0x1a8
0034 #define PARF_Q2A_FLUSH              0x1ac
0035 #define PARF_LTSSM              0x1b0
0036 #define PARF_CFG_BITS               0x210
0037 #define PARF_INT_ALL_STATUS         0x224
0038 #define PARF_INT_ALL_CLEAR          0x228
0039 #define PARF_INT_ALL_MASK           0x22c
0040 #define PARF_SLV_ADDR_MSB_CTRL          0x2c0
0041 #define PARF_DBI_BASE_ADDR          0x350
0042 #define PARF_DBI_BASE_ADDR_HI           0x354
0043 #define PARF_SLV_ADDR_SPACE_SIZE        0x358
0044 #define PARF_SLV_ADDR_SPACE_SIZE_HI     0x35c
0045 #define PARF_ATU_BASE_ADDR          0x634
0046 #define PARF_ATU_BASE_ADDR_HI           0x638
0047 #define PARF_SRIS_MODE              0x644
0048 #define PARF_DEVICE_TYPE            0x1000
0049 #define PARF_BDF_TO_SID_CFG         0x2c00
0050 
0051 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
0052 #define PARF_INT_ALL_LINK_DOWN          BIT(1)
0053 #define PARF_INT_ALL_BME            BIT(2)
0054 #define PARF_INT_ALL_PM_TURNOFF         BIT(3)
0055 #define PARF_INT_ALL_DEBUG          BIT(4)
0056 #define PARF_INT_ALL_LTR            BIT(5)
0057 #define PARF_INT_ALL_MHI_Q6         BIT(6)
0058 #define PARF_INT_ALL_MHI_A7         BIT(7)
0059 #define PARF_INT_ALL_DSTATE_CHANGE      BIT(8)
0060 #define PARF_INT_ALL_L1SUB_TIMEOUT      BIT(9)
0061 #define PARF_INT_ALL_MMIO_WRITE         BIT(10)
0062 #define PARF_INT_ALL_CFG_WRITE          BIT(11)
0063 #define PARF_INT_ALL_BRIDGE_FLUSH_N     BIT(12)
0064 #define PARF_INT_ALL_LINK_UP            BIT(13)
0065 #define PARF_INT_ALL_AER_LEGACY         BIT(14)
0066 #define PARF_INT_ALL_PLS_ERR            BIT(15)
0067 #define PARF_INT_ALL_PME_LEGACY         BIT(16)
0068 #define PARF_INT_ALL_PLS_PME            BIT(17)
0069 
0070 /* PARF_BDF_TO_SID_CFG register fields */
0071 #define PARF_BDF_TO_SID_BYPASS          BIT(0)
0072 
0073 /* PARF_DEBUG_INT_EN register fields */
0074 #define PARF_DEBUG_INT_PM_DSTATE_CHANGE     BIT(1)
0075 #define PARF_DEBUG_INT_CFG_BUS_MASTER_EN    BIT(2)
0076 #define PARF_DEBUG_INT_RADM_PM_TURNOFF      BIT(3)
0077 
0078 /* PARF_DEVICE_TYPE register fields */
0079 #define PARF_DEVICE_TYPE_EP         0x0
0080 
0081 /* PARF_PM_CTRL register fields */
0082 #define PARF_PM_CTRL_REQ_EXIT_L1        BIT(1)
0083 #define PARF_PM_CTRL_READY_ENTR_L23     BIT(2)
0084 #define PARF_PM_CTRL_REQ_NOT_ENTR_L1        BIT(5)
0085 
0086 /* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
0087 #define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN   BIT(0)
0088 
0089 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
0090 #define PARF_AXI_MSTR_WR_ADDR_HALT_EN       BIT(31)
0091 
0092 /* PARF_Q2A_FLUSH register fields */
0093 #define PARF_Q2A_FLUSH_EN           BIT(16)
0094 
0095 /* PARF_SYS_CTRL register fields */
0096 #define PARF_SYS_CTRL_AUX_PWR_DET       BIT(4)
0097 #define PARF_SYS_CTRL_CORE_CLK_CGC_DIS      BIT(6)
0098 #define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE  BIT(11)
0099 
0100 /* PARF_DB_CTRL register fields */
0101 #define PARF_DB_CTRL_INSR_DBNCR_BLOCK       BIT(0)
0102 #define PARF_DB_CTRL_RMVL_DBNCR_BLOCK       BIT(1)
0103 #define PARF_DB_CTRL_DBI_WKP_BLOCK      BIT(4)
0104 #define PARF_DB_CTRL_SLV_WKP_BLOCK      BIT(5)
0105 #define PARF_DB_CTRL_MST_WKP_BLOCK      BIT(6)
0106 
0107 /* PARF_CFG_BITS register fields */
0108 #define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN  BIT(1)
0109 
0110 /* ELBI registers */
0111 #define ELBI_SYS_STTS               0x08
0112 
0113 /* DBI registers */
0114 #define DBI_CON_STATUS              0x44
0115 
0116 /* DBI register fields */
0117 #define DBI_CON_STATUS_POWER_STATE_MASK     GENMASK(1, 0)
0118 
0119 #define XMLH_LINK_UP                0x400
0120 #define CORE_RESET_TIME_US_MIN          1000
0121 #define CORE_RESET_TIME_US_MAX          1005
0122 #define WAKE_DELAY_US               2000 /* 2 ms */
0123 
0124 #define to_pcie_ep(x)               dev_get_drvdata((x)->dev)
0125 
0126 enum qcom_pcie_ep_link_status {
0127     QCOM_PCIE_EP_LINK_DISABLED,
0128     QCOM_PCIE_EP_LINK_ENABLED,
0129     QCOM_PCIE_EP_LINK_UP,
0130     QCOM_PCIE_EP_LINK_DOWN,
0131 };
0132 
0133 static struct clk_bulk_data qcom_pcie_ep_clks[] = {
0134     { .id = "cfg" },
0135     { .id = "aux" },
0136     { .id = "bus_master" },
0137     { .id = "bus_slave" },
0138     { .id = "ref" },
0139     { .id = "sleep" },
0140     { .id = "slave_q2a" },
0141 };
0142 
0143 struct qcom_pcie_ep {
0144     struct dw_pcie pci;
0145 
0146     void __iomem *parf;
0147     void __iomem *elbi;
0148     struct regmap *perst_map;
0149     struct resource *mmio_res;
0150 
0151     struct reset_control *core_reset;
0152     struct gpio_desc *reset;
0153     struct gpio_desc *wake;
0154     struct phy *phy;
0155 
0156     u32 perst_en;
0157     u32 perst_sep_en;
0158 
0159     enum qcom_pcie_ep_link_status link_status;
0160     int global_irq;
0161     int perst_irq;
0162 };
0163 
0164 static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
0165 {
0166     struct dw_pcie *pci = &pcie_ep->pci;
0167     struct device *dev = pci->dev;
0168     int ret;
0169 
0170     ret = reset_control_assert(pcie_ep->core_reset);
0171     if (ret) {
0172         dev_err(dev, "Cannot assert core reset\n");
0173         return ret;
0174     }
0175 
0176     usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
0177 
0178     ret = reset_control_deassert(pcie_ep->core_reset);
0179     if (ret) {
0180         dev_err(dev, "Cannot de-assert core reset\n");
0181         return ret;
0182     }
0183 
0184     usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
0185 
0186     return 0;
0187 }
0188 
0189 /*
0190  * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
0191  * device reset during host reboot and hibernation. The driver is
0192  * expected to handle this situation.
0193  */
0194 static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
0195 {
0196     regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
0197     regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
0198 }
0199 
0200 static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
0201 {
0202     struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
0203     u32 reg;
0204 
0205     reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
0206 
0207     return reg & XMLH_LINK_UP;
0208 }
0209 
0210 static int qcom_pcie_dw_start_link(struct dw_pcie *pci)
0211 {
0212     struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
0213 
0214     enable_irq(pcie_ep->perst_irq);
0215 
0216     return 0;
0217 }
0218 
0219 static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
0220 {
0221     struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
0222 
0223     disable_irq(pcie_ep->perst_irq);
0224 }
0225 
0226 static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
0227 {
0228     int ret;
0229 
0230     ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
0231                       qcom_pcie_ep_clks);
0232     if (ret)
0233         return ret;
0234 
0235     ret = qcom_pcie_ep_core_reset(pcie_ep);
0236     if (ret)
0237         goto err_disable_clk;
0238 
0239     ret = phy_init(pcie_ep->phy);
0240     if (ret)
0241         goto err_disable_clk;
0242 
0243     ret = phy_power_on(pcie_ep->phy);
0244     if (ret)
0245         goto err_phy_exit;
0246 
0247     return 0;
0248 
0249 err_phy_exit:
0250     phy_exit(pcie_ep->phy);
0251 err_disable_clk:
0252     clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
0253                    qcom_pcie_ep_clks);
0254 
0255     return ret;
0256 }
0257 
0258 static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
0259 {
0260     phy_power_off(pcie_ep->phy);
0261     phy_exit(pcie_ep->phy);
0262     clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
0263                    qcom_pcie_ep_clks);
0264 }
0265 
0266 static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
0267 {
0268     struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
0269     struct device *dev = pci->dev;
0270     u32 val, offset;
0271     int ret;
0272 
0273     ret = qcom_pcie_enable_resources(pcie_ep);
0274     if (ret) {
0275         dev_err(dev, "Failed to enable resources: %d\n", ret);
0276         return ret;
0277     }
0278 
0279     /* Assert WAKE# to RC to indicate device is ready */
0280     gpiod_set_value_cansleep(pcie_ep->wake, 1);
0281     usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
0282     gpiod_set_value_cansleep(pcie_ep->wake, 0);
0283 
0284     qcom_pcie_ep_configure_tcsr(pcie_ep);
0285 
0286     /* Disable BDF to SID mapping */
0287     val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG);
0288     val |= PARF_BDF_TO_SID_BYPASS;
0289     writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG);
0290 
0291     /* Enable debug IRQ */
0292     val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN);
0293     val |= PARF_DEBUG_INT_RADM_PM_TURNOFF |
0294            PARF_DEBUG_INT_CFG_BUS_MASTER_EN |
0295            PARF_DEBUG_INT_PM_DSTATE_CHANGE;
0296     writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN);
0297 
0298     /* Configure PCIe to endpoint mode */
0299     writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE);
0300 
0301     /* Allow entering L1 state */
0302     val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
0303     val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1;
0304     writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
0305 
0306     /* Read halts write */
0307     val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
0308     val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN;
0309     writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
0310 
0311     /* Write after write halt */
0312     val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
0313     val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN;
0314     writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
0315 
0316     /* Q2A flush disable */
0317     val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH);
0318     val &= ~PARF_Q2A_FLUSH_EN;
0319     writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
0320 
0321     /* Disable DBI Wakeup, core clock CGC and enable AUX power */
0322     val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
0323     val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
0324            PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
0325            PARF_SYS_CTRL_AUX_PWR_DET;
0326     writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL);
0327 
0328     /* Disable the debouncers */
0329     val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL);
0330     val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK |
0331            PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK |
0332            PARF_DB_CTRL_MST_WKP_BLOCK;
0333     writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL);
0334 
0335     /* Request to exit from L1SS for MSI and LTR MSG */
0336     val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS);
0337     val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN;
0338     writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS);
0339 
0340     dw_pcie_dbi_ro_wr_en(pci);
0341 
0342     /* Set the L0s Exit Latency to 2us-4us = 0x6 */
0343     offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
0344     val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
0345     val &= ~PCI_EXP_LNKCAP_L0SEL;
0346     val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6);
0347     dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
0348 
0349     /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
0350     offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
0351     val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
0352     val &= ~PCI_EXP_LNKCAP_L1EL;
0353     val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6);
0354     dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
0355 
0356     dw_pcie_dbi_ro_wr_dis(pci);
0357 
0358     writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
0359     val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
0360           PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
0361           PARF_INT_ALL_LINK_UP;
0362     writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
0363 
0364     ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
0365     if (ret) {
0366         dev_err(dev, "Failed to complete initialization: %d\n", ret);
0367         goto err_disable_resources;
0368     }
0369 
0370     /*
0371      * The physical address of the MMIO region which is exposed as the BAR
0372      * should be written to MHI BASE registers.
0373      */
0374     writel_relaxed(pcie_ep->mmio_res->start,
0375                pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
0376     writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
0377 
0378     dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
0379 
0380     /* Enable LTSSM */
0381     val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
0382     val |= BIT(8);
0383     writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
0384 
0385     return 0;
0386 
0387 err_disable_resources:
0388     qcom_pcie_disable_resources(pcie_ep);
0389 
0390     return ret;
0391 }
0392 
0393 static void qcom_pcie_perst_assert(struct dw_pcie *pci)
0394 {
0395     struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
0396     struct device *dev = pci->dev;
0397 
0398     if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
0399         dev_dbg(dev, "Link is already disabled\n");
0400         return;
0401     }
0402 
0403     qcom_pcie_disable_resources(pcie_ep);
0404     pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
0405 }
0406 
0407 /* Common DWC controller ops */
0408 static const struct dw_pcie_ops pci_ops = {
0409     .link_up = qcom_pcie_dw_link_up,
0410     .start_link = qcom_pcie_dw_start_link,
0411     .stop_link = qcom_pcie_dw_stop_link,
0412 };
0413 
0414 static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
0415                      struct qcom_pcie_ep *pcie_ep)
0416 {
0417     struct device *dev = &pdev->dev;
0418     struct dw_pcie *pci = &pcie_ep->pci;
0419     struct device_node *syscon;
0420     struct resource *res;
0421     int ret;
0422 
0423     pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
0424     if (IS_ERR(pcie_ep->parf))
0425         return PTR_ERR(pcie_ep->parf);
0426 
0427     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
0428     pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
0429     if (IS_ERR(pci->dbi_base))
0430         return PTR_ERR(pci->dbi_base);
0431     pci->dbi_base2 = pci->dbi_base;
0432 
0433     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
0434     pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
0435     if (IS_ERR(pcie_ep->elbi))
0436         return PTR_ERR(pcie_ep->elbi);
0437 
0438     pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
0439                              "mmio");
0440 
0441     syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
0442     if (!syscon) {
0443         dev_err(dev, "Failed to parse qcom,perst-regs\n");
0444         return -EINVAL;
0445     }
0446 
0447     pcie_ep->perst_map = syscon_node_to_regmap(syscon);
0448     of_node_put(syscon);
0449     if (IS_ERR(pcie_ep->perst_map))
0450         return PTR_ERR(pcie_ep->perst_map);
0451 
0452     ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
0453                      1, &pcie_ep->perst_en);
0454     if (ret < 0) {
0455         dev_err(dev, "No Perst Enable offset in syscon\n");
0456         return ret;
0457     }
0458 
0459     ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
0460                      2, &pcie_ep->perst_sep_en);
0461     if (ret < 0) {
0462         dev_err(dev, "No Perst Separation Enable offset in syscon\n");
0463         return ret;
0464     }
0465 
0466     return 0;
0467 }
0468 
0469 static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
0470                       struct qcom_pcie_ep *pcie_ep)
0471 {
0472     struct device *dev = &pdev->dev;
0473     int ret;
0474 
0475     ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
0476     if (ret) {
0477         dev_err(&pdev->dev, "Failed to get io resources %d\n", ret);
0478         return ret;
0479     }
0480 
0481     ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks),
0482                 qcom_pcie_ep_clks);
0483     if (ret)
0484         return ret;
0485 
0486     pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
0487     if (IS_ERR(pcie_ep->core_reset))
0488         return PTR_ERR(pcie_ep->core_reset);
0489 
0490     pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN);
0491     if (IS_ERR(pcie_ep->reset))
0492         return PTR_ERR(pcie_ep->reset);
0493 
0494     pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW);
0495     if (IS_ERR(pcie_ep->wake))
0496         return PTR_ERR(pcie_ep->wake);
0497 
0498     pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
0499     if (IS_ERR(pcie_ep->phy))
0500         ret = PTR_ERR(pcie_ep->phy);
0501 
0502     return ret;
0503 }
0504 
0505 /* TODO: Notify clients about PCIe state change */
0506 static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
0507 {
0508     struct qcom_pcie_ep *pcie_ep = data;
0509     struct dw_pcie *pci = &pcie_ep->pci;
0510     struct device *dev = pci->dev;
0511     u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
0512     u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
0513     u32 dstate, val;
0514 
0515     writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
0516     status &= mask;
0517 
0518     if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
0519         dev_dbg(dev, "Received Linkdown event\n");
0520         pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
0521     } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
0522         dev_dbg(dev, "Received BME event. Link is enabled!\n");
0523         pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
0524     } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
0525         dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
0526         val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
0527         val |= PARF_PM_CTRL_READY_ENTR_L23;
0528         writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
0529     } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) {
0530         dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) &
0531                        DBI_CON_STATUS_POWER_STATE_MASK;
0532         dev_dbg(dev, "Received D%d state event\n", dstate);
0533         if (dstate == 3) {
0534             val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
0535             val |= PARF_PM_CTRL_REQ_EXIT_L1;
0536             writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
0537         }
0538     } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
0539         dev_dbg(dev, "Received Linkup event. Enumeration complete!\n");
0540         dw_pcie_ep_linkup(&pci->ep);
0541         pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
0542     } else {
0543         dev_dbg(dev, "Received unknown event: %d\n", status);
0544     }
0545 
0546     return IRQ_HANDLED;
0547 }
0548 
0549 static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
0550 {
0551     struct qcom_pcie_ep *pcie_ep = data;
0552     struct dw_pcie *pci = &pcie_ep->pci;
0553     struct device *dev = pci->dev;
0554     u32 perst;
0555 
0556     perst = gpiod_get_value(pcie_ep->reset);
0557     if (perst) {
0558         dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n");
0559         qcom_pcie_perst_assert(pci);
0560     } else {
0561         dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n");
0562         qcom_pcie_perst_deassert(pci);
0563     }
0564 
0565     irq_set_irq_type(gpiod_to_irq(pcie_ep->reset),
0566              (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
0567 
0568     return IRQ_HANDLED;
0569 }
0570 
0571 static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
0572                          struct qcom_pcie_ep *pcie_ep)
0573 {
0574     int irq, ret;
0575 
0576     irq = platform_get_irq_byname(pdev, "global");
0577     if (irq < 0)
0578         return irq;
0579 
0580     ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
0581                     qcom_pcie_ep_global_irq_thread,
0582                     IRQF_ONESHOT,
0583                     "global_irq", pcie_ep);
0584     if (ret) {
0585         dev_err(&pdev->dev, "Failed to request Global IRQ\n");
0586         return ret;
0587     }
0588 
0589     pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
0590     irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
0591     ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
0592                     qcom_pcie_ep_perst_irq_thread,
0593                     IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
0594                     "perst_irq", pcie_ep);
0595     if (ret) {
0596         dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
0597         disable_irq(irq);
0598         return ret;
0599     }
0600 
0601     return 0;
0602 }
0603 
0604 static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
0605                   enum pci_epc_irq_type type, u16 interrupt_num)
0606 {
0607     struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
0608 
0609     switch (type) {
0610     case PCI_EPC_IRQ_LEGACY:
0611         return dw_pcie_ep_raise_legacy_irq(ep, func_no);
0612     case PCI_EPC_IRQ_MSI:
0613         return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
0614     default:
0615         dev_err(pci->dev, "Unknown IRQ type\n");
0616         return -EINVAL;
0617     }
0618 }
0619 
0620 static const struct pci_epc_features qcom_pcie_epc_features = {
0621     .linkup_notifier = true,
0622     .core_init_notifier = true,
0623     .msi_capable = true,
0624     .msix_capable = false,
0625 };
0626 
0627 static const struct pci_epc_features *
0628 qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
0629 {
0630     return &qcom_pcie_epc_features;
0631 }
0632 
0633 static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
0634 {
0635     struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
0636     enum pci_barno bar;
0637 
0638     for (bar = BAR_0; bar <= BAR_5; bar++)
0639         dw_pcie_ep_reset_bar(pci, bar);
0640 }
0641 
0642 static const struct dw_pcie_ep_ops pci_ep_ops = {
0643     .ep_init = qcom_pcie_ep_init,
0644     .raise_irq = qcom_pcie_ep_raise_irq,
0645     .get_features = qcom_pcie_epc_get_features,
0646 };
0647 
0648 static int qcom_pcie_ep_probe(struct platform_device *pdev)
0649 {
0650     struct device *dev = &pdev->dev;
0651     struct qcom_pcie_ep *pcie_ep;
0652     int ret;
0653 
0654     pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
0655     if (!pcie_ep)
0656         return -ENOMEM;
0657 
0658     pcie_ep->pci.dev = dev;
0659     pcie_ep->pci.ops = &pci_ops;
0660     pcie_ep->pci.ep.ops = &pci_ep_ops;
0661     platform_set_drvdata(pdev, pcie_ep);
0662 
0663     ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
0664     if (ret)
0665         return ret;
0666 
0667     ret = qcom_pcie_enable_resources(pcie_ep);
0668     if (ret) {
0669         dev_err(dev, "Failed to enable resources: %d\n", ret);
0670         return ret;
0671     }
0672 
0673     ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
0674     if (ret) {
0675         dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
0676         goto err_disable_resources;
0677     }
0678 
0679     ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
0680     if (ret)
0681         goto err_disable_resources;
0682 
0683     return 0;
0684 
0685 err_disable_resources:
0686     qcom_pcie_disable_resources(pcie_ep);
0687 
0688     return ret;
0689 }
0690 
0691 static int qcom_pcie_ep_remove(struct platform_device *pdev)
0692 {
0693     struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
0694 
0695     if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
0696         return 0;
0697 
0698     qcom_pcie_disable_resources(pcie_ep);
0699 
0700     return 0;
0701 }
0702 
0703 static const struct of_device_id qcom_pcie_ep_match[] = {
0704     { .compatible = "qcom,sdx55-pcie-ep", },
0705     { }
0706 };
0707 
0708 static struct platform_driver qcom_pcie_ep_driver = {
0709     .probe  = qcom_pcie_ep_probe,
0710     .remove = qcom_pcie_ep_remove,
0711     .driver = {
0712         .name = "qcom-pcie-ep",
0713         .of_match_table = qcom_pcie_ep_match,
0714     },
0715 };
0716 builtin_platform_driver(qcom_pcie_ep_driver);
0717 
0718 MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>");
0719 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
0720 MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
0721 MODULE_LICENSE("GPL v2");