0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/clk.h>
0012 #include <linux/crc8.h>
0013 #include <linux/delay.h>
0014 #include <linux/gpio/consumer.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/iopoll.h>
0018 #include <linux/kernel.h>
0019 #include <linux/init.h>
0020 #include <linux/of_device.h>
0021 #include <linux/of_gpio.h>
0022 #include <linux/pci.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/platform_device.h>
0025 #include <linux/phy/phy.h>
0026 #include <linux/regulator/consumer.h>
0027 #include <linux/reset.h>
0028 #include <linux/slab.h>
0029 #include <linux/types.h>
0030
0031 #include "../../pci.h"
0032 #include "pcie-designware.h"
0033
0034 #define PCIE20_PARF_SYS_CTRL 0x00
0035 #define MST_WAKEUP_EN BIT(13)
0036 #define SLV_WAKEUP_EN BIT(12)
0037 #define MSTR_ACLK_CGC_DIS BIT(10)
0038 #define SLV_ACLK_CGC_DIS BIT(9)
0039 #define CORE_CLK_CGC_DIS BIT(6)
0040 #define AUX_PWR_DET BIT(4)
0041 #define L23_CLK_RMV_DIS BIT(2)
0042 #define L1_CLK_RMV_DIS BIT(1)
0043
0044 #define PCIE20_PARF_PM_CTRL 0x20
0045 #define REQ_NOT_ENTR_L1 BIT(5)
0046
0047 #define PCIE20_PARF_PHY_CTRL 0x40
0048 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
0049 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
0050
0051 #define PCIE20_PARF_PHY_REFCLK 0x4C
0052 #define PHY_REFCLK_SSP_EN BIT(16)
0053 #define PHY_REFCLK_USE_PAD BIT(12)
0054
0055 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
0056 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
0057 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
0058 #define AHB_CLK_EN BIT(0)
0059 #define MSTR_AXI_CLK_EN BIT(1)
0060 #define BYPASS BIT(4)
0061
0062 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
0063 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
0064 #define PCIE20_PARF_LTSSM 0x1B0
0065 #define PCIE20_PARF_SID_OFFSET 0x234
0066 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
0067 #define PCIE20_PARF_DEVICE_TYPE 0x1000
0068 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000
0069
0070 #define PCIE20_ELBI_SYS_CTRL 0x04
0071 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
0072
0073 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
0074 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
0075 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
0076 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
0077 #define CFG_BRIDGE_SB_INIT BIT(0)
0078
0079 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \
0080 250)
0081 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \
0082 1)
0083 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
0084 PCI_EXP_SLTCAP_PCP | \
0085 PCI_EXP_SLTCAP_MRLSP | \
0086 PCI_EXP_SLTCAP_AIP | \
0087 PCI_EXP_SLTCAP_PIP | \
0088 PCI_EXP_SLTCAP_HPS | \
0089 PCI_EXP_SLTCAP_HPC | \
0090 PCI_EXP_SLTCAP_EIP | \
0091 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
0092 PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
0093
0094 #define PCIE20_PARF_Q2A_FLUSH 0x1AC
0095
0096 #define PCIE20_MISC_CONTROL_1_REG 0x8BC
0097 #define DBI_RO_WR_EN 1
0098
0099 #define PERST_DELAY_US 1000
0100
0101 #define PCIE20_PARF_PCS_DEEMPH 0x34
0102 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
0103 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
0104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
0105
0106 #define PCIE20_PARF_PCS_SWING 0x38
0107 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
0108 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
0109
0110 #define PCIE20_PARF_CONFIG_BITS 0x50
0111 #define PHY_RX0_EQ(x) ((x) << 24)
0112
0113 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
0114 #define SLV_ADDR_SPACE_SZ 0x10000000
0115
0116 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0
0117
0118 #define DEVICE_TYPE_RC 0x4
0119
0120 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
0121 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
0122
0123 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
0124
0125 struct qcom_pcie_resources_2_1_0 {
0126 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
0127 struct reset_control *pci_reset;
0128 struct reset_control *axi_reset;
0129 struct reset_control *ahb_reset;
0130 struct reset_control *por_reset;
0131 struct reset_control *phy_reset;
0132 struct reset_control *ext_reset;
0133 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
0134 };
0135
0136 struct qcom_pcie_resources_1_0_0 {
0137 struct clk *iface;
0138 struct clk *aux;
0139 struct clk *master_bus;
0140 struct clk *slave_bus;
0141 struct reset_control *core;
0142 struct regulator *vdda;
0143 };
0144
0145 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
0146 struct qcom_pcie_resources_2_3_2 {
0147 struct clk *aux_clk;
0148 struct clk *master_clk;
0149 struct clk *slave_clk;
0150 struct clk *cfg_clk;
0151 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
0152 };
0153
0154 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
0155 struct qcom_pcie_resources_2_4_0 {
0156 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
0157 int num_clks;
0158 struct reset_control *axi_m_reset;
0159 struct reset_control *axi_s_reset;
0160 struct reset_control *pipe_reset;
0161 struct reset_control *axi_m_vmid_reset;
0162 struct reset_control *axi_s_xpu_reset;
0163 struct reset_control *parf_reset;
0164 struct reset_control *phy_reset;
0165 struct reset_control *axi_m_sticky_reset;
0166 struct reset_control *pipe_sticky_reset;
0167 struct reset_control *pwr_reset;
0168 struct reset_control *ahb_reset;
0169 struct reset_control *phy_ahb_reset;
0170 };
0171
0172 struct qcom_pcie_resources_2_3_3 {
0173 struct clk *iface;
0174 struct clk *axi_m_clk;
0175 struct clk *axi_s_clk;
0176 struct clk *ahb_clk;
0177 struct clk *aux_clk;
0178 struct reset_control *rst[7];
0179 };
0180
0181
0182 struct qcom_pcie_resources_2_7_0 {
0183 struct clk_bulk_data clks[9];
0184 int num_clks;
0185 struct regulator_bulk_data supplies[2];
0186 struct reset_control *pci_reset;
0187 };
0188
0189 struct qcom_pcie_resources_2_9_0 {
0190 struct clk_bulk_data clks[5];
0191 struct reset_control *rst;
0192 };
0193
0194 union qcom_pcie_resources {
0195 struct qcom_pcie_resources_1_0_0 v1_0_0;
0196 struct qcom_pcie_resources_2_1_0 v2_1_0;
0197 struct qcom_pcie_resources_2_3_2 v2_3_2;
0198 struct qcom_pcie_resources_2_3_3 v2_3_3;
0199 struct qcom_pcie_resources_2_4_0 v2_4_0;
0200 struct qcom_pcie_resources_2_7_0 v2_7_0;
0201 struct qcom_pcie_resources_2_9_0 v2_9_0;
0202 };
0203
0204 struct qcom_pcie;
0205
0206 struct qcom_pcie_ops {
0207 int (*get_resources)(struct qcom_pcie *pcie);
0208 int (*init)(struct qcom_pcie *pcie);
0209 int (*post_init)(struct qcom_pcie *pcie);
0210 void (*deinit)(struct qcom_pcie *pcie);
0211 void (*post_deinit)(struct qcom_pcie *pcie);
0212 void (*ltssm_enable)(struct qcom_pcie *pcie);
0213 int (*config_sid)(struct qcom_pcie *pcie);
0214 };
0215
0216 struct qcom_pcie_cfg {
0217 const struct qcom_pcie_ops *ops;
0218 unsigned int has_tbu_clk:1;
0219 unsigned int has_ddrss_sf_tbu_clk:1;
0220 unsigned int has_aggre0_clk:1;
0221 unsigned int has_aggre1_clk:1;
0222 };
0223
0224 struct qcom_pcie {
0225 struct dw_pcie *pci;
0226 void __iomem *parf;
0227 void __iomem *elbi;
0228 union qcom_pcie_resources res;
0229 struct phy *phy;
0230 struct gpio_desc *reset;
0231 const struct qcom_pcie_cfg *cfg;
0232 };
0233
0234 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
0235
0236 static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
0237 {
0238 gpiod_set_value_cansleep(pcie->reset, 1);
0239 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
0240 }
0241
0242 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
0243 {
0244
0245 msleep(100);
0246 gpiod_set_value_cansleep(pcie->reset, 0);
0247 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
0248 }
0249
0250 static int qcom_pcie_start_link(struct dw_pcie *pci)
0251 {
0252 struct qcom_pcie *pcie = to_qcom_pcie(pci);
0253
0254
0255 if (pcie->cfg->ops->ltssm_enable)
0256 pcie->cfg->ops->ltssm_enable(pcie);
0257
0258 return 0;
0259 }
0260
0261 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
0262 {
0263 u32 val;
0264
0265
0266 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
0267 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
0268 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
0269 }
0270
0271 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
0272 {
0273 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
0274 struct dw_pcie *pci = pcie->pci;
0275 struct device *dev = pci->dev;
0276 int ret;
0277
0278 res->supplies[0].supply = "vdda";
0279 res->supplies[1].supply = "vdda_phy";
0280 res->supplies[2].supply = "vdda_refclk";
0281 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
0282 res->supplies);
0283 if (ret)
0284 return ret;
0285
0286 res->clks[0].id = "iface";
0287 res->clks[1].id = "core";
0288 res->clks[2].id = "phy";
0289 res->clks[3].id = "aux";
0290 res->clks[4].id = "ref";
0291
0292
0293 ret = devm_clk_bulk_get(dev, 3, res->clks);
0294 if (ret < 0)
0295 return ret;
0296
0297
0298 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
0299 if (ret < 0)
0300 return ret;
0301
0302 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
0303 if (IS_ERR(res->pci_reset))
0304 return PTR_ERR(res->pci_reset);
0305
0306 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
0307 if (IS_ERR(res->axi_reset))
0308 return PTR_ERR(res->axi_reset);
0309
0310 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
0311 if (IS_ERR(res->ahb_reset))
0312 return PTR_ERR(res->ahb_reset);
0313
0314 res->por_reset = devm_reset_control_get_exclusive(dev, "por");
0315 if (IS_ERR(res->por_reset))
0316 return PTR_ERR(res->por_reset);
0317
0318 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
0319 if (IS_ERR(res->ext_reset))
0320 return PTR_ERR(res->ext_reset);
0321
0322 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
0323 return PTR_ERR_OR_ZERO(res->phy_reset);
0324 }
0325
0326 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
0327 {
0328 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
0329
0330 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
0331 reset_control_assert(res->pci_reset);
0332 reset_control_assert(res->axi_reset);
0333 reset_control_assert(res->ahb_reset);
0334 reset_control_assert(res->por_reset);
0335 reset_control_assert(res->ext_reset);
0336 reset_control_assert(res->phy_reset);
0337
0338 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
0339
0340 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
0341 }
0342
0343 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
0344 {
0345 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
0346 struct dw_pcie *pci = pcie->pci;
0347 struct device *dev = pci->dev;
0348 int ret;
0349
0350
0351 reset_control_assert(res->pci_reset);
0352 reset_control_assert(res->axi_reset);
0353 reset_control_assert(res->ahb_reset);
0354 reset_control_assert(res->por_reset);
0355 reset_control_assert(res->ext_reset);
0356 reset_control_assert(res->phy_reset);
0357
0358 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
0359 if (ret < 0) {
0360 dev_err(dev, "cannot enable regulators\n");
0361 return ret;
0362 }
0363
0364 ret = reset_control_deassert(res->ahb_reset);
0365 if (ret) {
0366 dev_err(dev, "cannot deassert ahb reset\n");
0367 goto err_deassert_ahb;
0368 }
0369
0370 ret = reset_control_deassert(res->ext_reset);
0371 if (ret) {
0372 dev_err(dev, "cannot deassert ext reset\n");
0373 goto err_deassert_ext;
0374 }
0375
0376 ret = reset_control_deassert(res->phy_reset);
0377 if (ret) {
0378 dev_err(dev, "cannot deassert phy reset\n");
0379 goto err_deassert_phy;
0380 }
0381
0382 ret = reset_control_deassert(res->pci_reset);
0383 if (ret) {
0384 dev_err(dev, "cannot deassert pci reset\n");
0385 goto err_deassert_pci;
0386 }
0387
0388 ret = reset_control_deassert(res->por_reset);
0389 if (ret) {
0390 dev_err(dev, "cannot deassert por reset\n");
0391 goto err_deassert_por;
0392 }
0393
0394 ret = reset_control_deassert(res->axi_reset);
0395 if (ret) {
0396 dev_err(dev, "cannot deassert axi reset\n");
0397 goto err_deassert_axi;
0398 }
0399
0400 return 0;
0401
0402 err_deassert_axi:
0403 reset_control_assert(res->por_reset);
0404 err_deassert_por:
0405 reset_control_assert(res->pci_reset);
0406 err_deassert_pci:
0407 reset_control_assert(res->phy_reset);
0408 err_deassert_phy:
0409 reset_control_assert(res->ext_reset);
0410 err_deassert_ext:
0411 reset_control_assert(res->ahb_reset);
0412 err_deassert_ahb:
0413 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
0414
0415 return ret;
0416 }
0417
0418 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
0419 {
0420 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
0421 struct dw_pcie *pci = pcie->pci;
0422 struct device *dev = pci->dev;
0423 struct device_node *node = dev->of_node;
0424 u32 val;
0425 int ret;
0426
0427
0428 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
0429 val &= ~BIT(0);
0430 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
0431
0432 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
0433 if (ret)
0434 return ret;
0435
0436 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
0437 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
0438 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
0439 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
0440 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
0441 pcie->parf + PCIE20_PARF_PCS_DEEMPH);
0442 writel(PCS_SWING_TX_SWING_FULL(120) |
0443 PCS_SWING_TX_SWING_LOW(120),
0444 pcie->parf + PCIE20_PARF_PCS_SWING);
0445 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
0446 }
0447
0448 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
0449
0450 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
0451 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
0452 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
0453 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
0454 }
0455
0456
0457 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
0458
0459 if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
0460 val &= ~PHY_REFCLK_USE_PAD;
0461 val |= PHY_REFCLK_SSP_EN;
0462 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
0463
0464
0465 usleep_range(1000, 1500);
0466
0467
0468 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
0469 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
0470 writel(CFG_BRIDGE_SB_INIT,
0471 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
0472
0473 return 0;
0474 }
0475
0476 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
0477 {
0478 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
0479 struct dw_pcie *pci = pcie->pci;
0480 struct device *dev = pci->dev;
0481
0482 res->vdda = devm_regulator_get(dev, "vdda");
0483 if (IS_ERR(res->vdda))
0484 return PTR_ERR(res->vdda);
0485
0486 res->iface = devm_clk_get(dev, "iface");
0487 if (IS_ERR(res->iface))
0488 return PTR_ERR(res->iface);
0489
0490 res->aux = devm_clk_get(dev, "aux");
0491 if (IS_ERR(res->aux))
0492 return PTR_ERR(res->aux);
0493
0494 res->master_bus = devm_clk_get(dev, "master_bus");
0495 if (IS_ERR(res->master_bus))
0496 return PTR_ERR(res->master_bus);
0497
0498 res->slave_bus = devm_clk_get(dev, "slave_bus");
0499 if (IS_ERR(res->slave_bus))
0500 return PTR_ERR(res->slave_bus);
0501
0502 res->core = devm_reset_control_get_exclusive(dev, "core");
0503 return PTR_ERR_OR_ZERO(res->core);
0504 }
0505
0506 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
0507 {
0508 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
0509
0510 reset_control_assert(res->core);
0511 clk_disable_unprepare(res->slave_bus);
0512 clk_disable_unprepare(res->master_bus);
0513 clk_disable_unprepare(res->iface);
0514 clk_disable_unprepare(res->aux);
0515 regulator_disable(res->vdda);
0516 }
0517
0518 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
0519 {
0520 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
0521 struct dw_pcie *pci = pcie->pci;
0522 struct device *dev = pci->dev;
0523 int ret;
0524
0525 ret = reset_control_deassert(res->core);
0526 if (ret) {
0527 dev_err(dev, "cannot deassert core reset\n");
0528 return ret;
0529 }
0530
0531 ret = clk_prepare_enable(res->aux);
0532 if (ret) {
0533 dev_err(dev, "cannot prepare/enable aux clock\n");
0534 goto err_res;
0535 }
0536
0537 ret = clk_prepare_enable(res->iface);
0538 if (ret) {
0539 dev_err(dev, "cannot prepare/enable iface clock\n");
0540 goto err_aux;
0541 }
0542
0543 ret = clk_prepare_enable(res->master_bus);
0544 if (ret) {
0545 dev_err(dev, "cannot prepare/enable master_bus clock\n");
0546 goto err_iface;
0547 }
0548
0549 ret = clk_prepare_enable(res->slave_bus);
0550 if (ret) {
0551 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
0552 goto err_master;
0553 }
0554
0555 ret = regulator_enable(res->vdda);
0556 if (ret) {
0557 dev_err(dev, "cannot enable vdda regulator\n");
0558 goto err_slave;
0559 }
0560
0561 return 0;
0562 err_slave:
0563 clk_disable_unprepare(res->slave_bus);
0564 err_master:
0565 clk_disable_unprepare(res->master_bus);
0566 err_iface:
0567 clk_disable_unprepare(res->iface);
0568 err_aux:
0569 clk_disable_unprepare(res->aux);
0570 err_res:
0571 reset_control_assert(res->core);
0572
0573 return ret;
0574 }
0575
0576 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
0577 {
0578
0579 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
0580
0581 if (IS_ENABLED(CONFIG_PCI_MSI)) {
0582 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
0583
0584 val |= BIT(31);
0585 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
0586 }
0587
0588 return 0;
0589 }
0590
0591 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
0592 {
0593 u32 val;
0594
0595
0596 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
0597 val |= BIT(8);
0598 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
0599 }
0600
0601 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
0602 {
0603 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
0604 struct dw_pcie *pci = pcie->pci;
0605 struct device *dev = pci->dev;
0606 int ret;
0607
0608 res->supplies[0].supply = "vdda";
0609 res->supplies[1].supply = "vddpe-3v3";
0610 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
0611 res->supplies);
0612 if (ret)
0613 return ret;
0614
0615 res->aux_clk = devm_clk_get(dev, "aux");
0616 if (IS_ERR(res->aux_clk))
0617 return PTR_ERR(res->aux_clk);
0618
0619 res->cfg_clk = devm_clk_get(dev, "cfg");
0620 if (IS_ERR(res->cfg_clk))
0621 return PTR_ERR(res->cfg_clk);
0622
0623 res->master_clk = devm_clk_get(dev, "bus_master");
0624 if (IS_ERR(res->master_clk))
0625 return PTR_ERR(res->master_clk);
0626
0627 res->slave_clk = devm_clk_get(dev, "bus_slave");
0628 if (IS_ERR(res->slave_clk))
0629 return PTR_ERR(res->slave_clk);
0630
0631 return 0;
0632 }
0633
0634 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
0635 {
0636 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
0637
0638 clk_disable_unprepare(res->slave_clk);
0639 clk_disable_unprepare(res->master_clk);
0640 clk_disable_unprepare(res->cfg_clk);
0641 clk_disable_unprepare(res->aux_clk);
0642
0643 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
0644 }
0645
0646 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
0647 {
0648 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
0649 struct dw_pcie *pci = pcie->pci;
0650 struct device *dev = pci->dev;
0651 int ret;
0652
0653 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
0654 if (ret < 0) {
0655 dev_err(dev, "cannot enable regulators\n");
0656 return ret;
0657 }
0658
0659 ret = clk_prepare_enable(res->aux_clk);
0660 if (ret) {
0661 dev_err(dev, "cannot prepare/enable aux clock\n");
0662 goto err_aux_clk;
0663 }
0664
0665 ret = clk_prepare_enable(res->cfg_clk);
0666 if (ret) {
0667 dev_err(dev, "cannot prepare/enable cfg clock\n");
0668 goto err_cfg_clk;
0669 }
0670
0671 ret = clk_prepare_enable(res->master_clk);
0672 if (ret) {
0673 dev_err(dev, "cannot prepare/enable master clock\n");
0674 goto err_master_clk;
0675 }
0676
0677 ret = clk_prepare_enable(res->slave_clk);
0678 if (ret) {
0679 dev_err(dev, "cannot prepare/enable slave clock\n");
0680 goto err_slave_clk;
0681 }
0682
0683 return 0;
0684
0685 err_slave_clk:
0686 clk_disable_unprepare(res->master_clk);
0687 err_master_clk:
0688 clk_disable_unprepare(res->cfg_clk);
0689 err_cfg_clk:
0690 clk_disable_unprepare(res->aux_clk);
0691
0692 err_aux_clk:
0693 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
0694
0695 return ret;
0696 }
0697
0698 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
0699 {
0700 u32 val;
0701
0702
0703 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
0704 val &= ~BIT(0);
0705 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
0706
0707
0708 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
0709
0710
0711 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
0712 val &= ~BIT(29);
0713 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
0714
0715 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
0716 val |= BIT(4);
0717 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
0718
0719 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
0720 val |= BIT(31);
0721 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
0722
0723 return 0;
0724 }
0725
0726 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
0727 {
0728 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
0729 struct dw_pcie *pci = pcie->pci;
0730 struct device *dev = pci->dev;
0731 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
0732 int ret;
0733
0734 res->clks[0].id = "aux";
0735 res->clks[1].id = "master_bus";
0736 res->clks[2].id = "slave_bus";
0737 res->clks[3].id = "iface";
0738
0739
0740 res->num_clks = is_ipq ? 3 : 4;
0741
0742 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
0743 if (ret < 0)
0744 return ret;
0745
0746 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
0747 if (IS_ERR(res->axi_m_reset))
0748 return PTR_ERR(res->axi_m_reset);
0749
0750 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
0751 if (IS_ERR(res->axi_s_reset))
0752 return PTR_ERR(res->axi_s_reset);
0753
0754 if (is_ipq) {
0755
0756
0757
0758
0759 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
0760 if (IS_ERR(res->pipe_reset))
0761 return PTR_ERR(res->pipe_reset);
0762
0763 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
0764 "axi_m_vmid");
0765 if (IS_ERR(res->axi_m_vmid_reset))
0766 return PTR_ERR(res->axi_m_vmid_reset);
0767
0768 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
0769 "axi_s_xpu");
0770 if (IS_ERR(res->axi_s_xpu_reset))
0771 return PTR_ERR(res->axi_s_xpu_reset);
0772
0773 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
0774 if (IS_ERR(res->parf_reset))
0775 return PTR_ERR(res->parf_reset);
0776
0777 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
0778 if (IS_ERR(res->phy_reset))
0779 return PTR_ERR(res->phy_reset);
0780 }
0781
0782 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
0783 "axi_m_sticky");
0784 if (IS_ERR(res->axi_m_sticky_reset))
0785 return PTR_ERR(res->axi_m_sticky_reset);
0786
0787 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
0788 "pipe_sticky");
0789 if (IS_ERR(res->pipe_sticky_reset))
0790 return PTR_ERR(res->pipe_sticky_reset);
0791
0792 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
0793 if (IS_ERR(res->pwr_reset))
0794 return PTR_ERR(res->pwr_reset);
0795
0796 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
0797 if (IS_ERR(res->ahb_reset))
0798 return PTR_ERR(res->ahb_reset);
0799
0800 if (is_ipq) {
0801 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
0802 if (IS_ERR(res->phy_ahb_reset))
0803 return PTR_ERR(res->phy_ahb_reset);
0804 }
0805
0806 return 0;
0807 }
0808
0809 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
0810 {
0811 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
0812
0813 reset_control_assert(res->axi_m_reset);
0814 reset_control_assert(res->axi_s_reset);
0815 reset_control_assert(res->pipe_reset);
0816 reset_control_assert(res->pipe_sticky_reset);
0817 reset_control_assert(res->phy_reset);
0818 reset_control_assert(res->phy_ahb_reset);
0819 reset_control_assert(res->axi_m_sticky_reset);
0820 reset_control_assert(res->pwr_reset);
0821 reset_control_assert(res->ahb_reset);
0822 clk_bulk_disable_unprepare(res->num_clks, res->clks);
0823 }
0824
0825 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
0826 {
0827 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
0828 struct dw_pcie *pci = pcie->pci;
0829 struct device *dev = pci->dev;
0830 int ret;
0831
0832 ret = reset_control_assert(res->axi_m_reset);
0833 if (ret) {
0834 dev_err(dev, "cannot assert axi master reset\n");
0835 return ret;
0836 }
0837
0838 ret = reset_control_assert(res->axi_s_reset);
0839 if (ret) {
0840 dev_err(dev, "cannot assert axi slave reset\n");
0841 return ret;
0842 }
0843
0844 usleep_range(10000, 12000);
0845
0846 ret = reset_control_assert(res->pipe_reset);
0847 if (ret) {
0848 dev_err(dev, "cannot assert pipe reset\n");
0849 return ret;
0850 }
0851
0852 ret = reset_control_assert(res->pipe_sticky_reset);
0853 if (ret) {
0854 dev_err(dev, "cannot assert pipe sticky reset\n");
0855 return ret;
0856 }
0857
0858 ret = reset_control_assert(res->phy_reset);
0859 if (ret) {
0860 dev_err(dev, "cannot assert phy reset\n");
0861 return ret;
0862 }
0863
0864 ret = reset_control_assert(res->phy_ahb_reset);
0865 if (ret) {
0866 dev_err(dev, "cannot assert phy ahb reset\n");
0867 return ret;
0868 }
0869
0870 usleep_range(10000, 12000);
0871
0872 ret = reset_control_assert(res->axi_m_sticky_reset);
0873 if (ret) {
0874 dev_err(dev, "cannot assert axi master sticky reset\n");
0875 return ret;
0876 }
0877
0878 ret = reset_control_assert(res->pwr_reset);
0879 if (ret) {
0880 dev_err(dev, "cannot assert power reset\n");
0881 return ret;
0882 }
0883
0884 ret = reset_control_assert(res->ahb_reset);
0885 if (ret) {
0886 dev_err(dev, "cannot assert ahb reset\n");
0887 return ret;
0888 }
0889
0890 usleep_range(10000, 12000);
0891
0892 ret = reset_control_deassert(res->phy_ahb_reset);
0893 if (ret) {
0894 dev_err(dev, "cannot deassert phy ahb reset\n");
0895 return ret;
0896 }
0897
0898 ret = reset_control_deassert(res->phy_reset);
0899 if (ret) {
0900 dev_err(dev, "cannot deassert phy reset\n");
0901 goto err_rst_phy;
0902 }
0903
0904 ret = reset_control_deassert(res->pipe_reset);
0905 if (ret) {
0906 dev_err(dev, "cannot deassert pipe reset\n");
0907 goto err_rst_pipe;
0908 }
0909
0910 ret = reset_control_deassert(res->pipe_sticky_reset);
0911 if (ret) {
0912 dev_err(dev, "cannot deassert pipe sticky reset\n");
0913 goto err_rst_pipe_sticky;
0914 }
0915
0916 usleep_range(10000, 12000);
0917
0918 ret = reset_control_deassert(res->axi_m_reset);
0919 if (ret) {
0920 dev_err(dev, "cannot deassert axi master reset\n");
0921 goto err_rst_axi_m;
0922 }
0923
0924 ret = reset_control_deassert(res->axi_m_sticky_reset);
0925 if (ret) {
0926 dev_err(dev, "cannot deassert axi master sticky reset\n");
0927 goto err_rst_axi_m_sticky;
0928 }
0929
0930 ret = reset_control_deassert(res->axi_s_reset);
0931 if (ret) {
0932 dev_err(dev, "cannot deassert axi slave reset\n");
0933 goto err_rst_axi_s;
0934 }
0935
0936 ret = reset_control_deassert(res->pwr_reset);
0937 if (ret) {
0938 dev_err(dev, "cannot deassert power reset\n");
0939 goto err_rst_pwr;
0940 }
0941
0942 ret = reset_control_deassert(res->ahb_reset);
0943 if (ret) {
0944 dev_err(dev, "cannot deassert ahb reset\n");
0945 goto err_rst_ahb;
0946 }
0947
0948 usleep_range(10000, 12000);
0949
0950 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
0951 if (ret)
0952 goto err_clks;
0953
0954 return 0;
0955
0956 err_clks:
0957 reset_control_assert(res->ahb_reset);
0958 err_rst_ahb:
0959 reset_control_assert(res->pwr_reset);
0960 err_rst_pwr:
0961 reset_control_assert(res->axi_s_reset);
0962 err_rst_axi_s:
0963 reset_control_assert(res->axi_m_sticky_reset);
0964 err_rst_axi_m_sticky:
0965 reset_control_assert(res->axi_m_reset);
0966 err_rst_axi_m:
0967 reset_control_assert(res->pipe_sticky_reset);
0968 err_rst_pipe_sticky:
0969 reset_control_assert(res->pipe_reset);
0970 err_rst_pipe:
0971 reset_control_assert(res->phy_reset);
0972 err_rst_phy:
0973 reset_control_assert(res->phy_ahb_reset);
0974 return ret;
0975 }
0976
0977 static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie)
0978 {
0979 u32 val;
0980
0981
0982 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
0983 val &= ~BIT(0);
0984 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
0985
0986
0987 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
0988
0989
0990 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
0991 val &= ~BIT(29);
0992 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
0993
0994 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
0995 val |= BIT(4);
0996 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
0997
0998 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
0999 val |= BIT(31);
1000 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1001
1002 return 0;
1003 }
1004
1005 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
1006 {
1007 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1008 struct dw_pcie *pci = pcie->pci;
1009 struct device *dev = pci->dev;
1010 int i;
1011 const char *rst_names[] = { "axi_m", "axi_s", "pipe",
1012 "axi_m_sticky", "sticky",
1013 "ahb", "sleep", };
1014
1015 res->iface = devm_clk_get(dev, "iface");
1016 if (IS_ERR(res->iface))
1017 return PTR_ERR(res->iface);
1018
1019 res->axi_m_clk = devm_clk_get(dev, "axi_m");
1020 if (IS_ERR(res->axi_m_clk))
1021 return PTR_ERR(res->axi_m_clk);
1022
1023 res->axi_s_clk = devm_clk_get(dev, "axi_s");
1024 if (IS_ERR(res->axi_s_clk))
1025 return PTR_ERR(res->axi_s_clk);
1026
1027 res->ahb_clk = devm_clk_get(dev, "ahb");
1028 if (IS_ERR(res->ahb_clk))
1029 return PTR_ERR(res->ahb_clk);
1030
1031 res->aux_clk = devm_clk_get(dev, "aux");
1032 if (IS_ERR(res->aux_clk))
1033 return PTR_ERR(res->aux_clk);
1034
1035 for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
1036 res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
1037 if (IS_ERR(res->rst[i]))
1038 return PTR_ERR(res->rst[i]);
1039 }
1040
1041 return 0;
1042 }
1043
1044 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1045 {
1046 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1047
1048 clk_disable_unprepare(res->iface);
1049 clk_disable_unprepare(res->axi_m_clk);
1050 clk_disable_unprepare(res->axi_s_clk);
1051 clk_disable_unprepare(res->ahb_clk);
1052 clk_disable_unprepare(res->aux_clk);
1053 }
1054
1055 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1056 {
1057 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1058 struct dw_pcie *pci = pcie->pci;
1059 struct device *dev = pci->dev;
1060 int i, ret;
1061
1062 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1063 ret = reset_control_assert(res->rst[i]);
1064 if (ret) {
1065 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1066 return ret;
1067 }
1068 }
1069
1070 usleep_range(2000, 2500);
1071
1072 for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1073 ret = reset_control_deassert(res->rst[i]);
1074 if (ret) {
1075 dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1076 ret);
1077 return ret;
1078 }
1079 }
1080
1081
1082
1083
1084
1085 usleep_range(2000, 2500);
1086
1087 ret = clk_prepare_enable(res->iface);
1088 if (ret) {
1089 dev_err(dev, "cannot prepare/enable core clock\n");
1090 goto err_clk_iface;
1091 }
1092
1093 ret = clk_prepare_enable(res->axi_m_clk);
1094 if (ret) {
1095 dev_err(dev, "cannot prepare/enable core clock\n");
1096 goto err_clk_axi_m;
1097 }
1098
1099 ret = clk_prepare_enable(res->axi_s_clk);
1100 if (ret) {
1101 dev_err(dev, "cannot prepare/enable axi slave clock\n");
1102 goto err_clk_axi_s;
1103 }
1104
1105 ret = clk_prepare_enable(res->ahb_clk);
1106 if (ret) {
1107 dev_err(dev, "cannot prepare/enable ahb clock\n");
1108 goto err_clk_ahb;
1109 }
1110
1111 ret = clk_prepare_enable(res->aux_clk);
1112 if (ret) {
1113 dev_err(dev, "cannot prepare/enable aux clock\n");
1114 goto err_clk_aux;
1115 }
1116
1117 return 0;
1118
1119 err_clk_aux:
1120 clk_disable_unprepare(res->ahb_clk);
1121 err_clk_ahb:
1122 clk_disable_unprepare(res->axi_s_clk);
1123 err_clk_axi_s:
1124 clk_disable_unprepare(res->axi_m_clk);
1125 err_clk_axi_m:
1126 clk_disable_unprepare(res->iface);
1127 err_clk_iface:
1128
1129
1130
1131
1132 for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1133 reset_control_assert(res->rst[i]);
1134
1135 return ret;
1136 }
1137
1138 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
1139 {
1140 struct dw_pcie *pci = pcie->pci;
1141 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1142 u32 val;
1143
1144 writel(SLV_ADDR_SPACE_SZ,
1145 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1146
1147 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1148 val &= ~BIT(0);
1149 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1150
1151 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1152
1153 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1154 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1155 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1156 pcie->parf + PCIE20_PARF_SYS_CTRL);
1157 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1158
1159 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1160 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1161 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1162
1163 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1164 val &= ~PCI_EXP_LNKCAP_ASPMS;
1165 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1166
1167 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1168 PCI_EXP_DEVCTL2);
1169
1170 return 0;
1171 }
1172
1173 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1174 {
1175 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1176 struct dw_pcie *pci = pcie->pci;
1177 struct device *dev = pci->dev;
1178 unsigned int idx;
1179 int ret;
1180
1181 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1182 if (IS_ERR(res->pci_reset))
1183 return PTR_ERR(res->pci_reset);
1184
1185 res->supplies[0].supply = "vdda";
1186 res->supplies[1].supply = "vddpe-3v3";
1187 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1188 res->supplies);
1189 if (ret)
1190 return ret;
1191
1192 idx = 0;
1193 res->clks[idx++].id = "aux";
1194 res->clks[idx++].id = "cfg";
1195 res->clks[idx++].id = "bus_master";
1196 res->clks[idx++].id = "bus_slave";
1197 res->clks[idx++].id = "slave_q2a";
1198 if (pcie->cfg->has_tbu_clk)
1199 res->clks[idx++].id = "tbu";
1200 if (pcie->cfg->has_ddrss_sf_tbu_clk)
1201 res->clks[idx++].id = "ddrss_sf_tbu";
1202 if (pcie->cfg->has_aggre0_clk)
1203 res->clks[idx++].id = "aggre0";
1204 if (pcie->cfg->has_aggre1_clk)
1205 res->clks[idx++].id = "aggre1";
1206
1207 res->num_clks = idx;
1208
1209 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
1210 if (ret < 0)
1211 return ret;
1212
1213 return 0;
1214 }
1215
1216 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1217 {
1218 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1219 struct dw_pcie *pci = pcie->pci;
1220 struct device *dev = pci->dev;
1221 u32 val;
1222 int ret;
1223
1224 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1225 if (ret < 0) {
1226 dev_err(dev, "cannot enable regulators\n");
1227 return ret;
1228 }
1229
1230 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
1231 if (ret < 0)
1232 goto err_disable_regulators;
1233
1234 ret = reset_control_assert(res->pci_reset);
1235 if (ret < 0) {
1236 dev_err(dev, "cannot deassert pci reset\n");
1237 goto err_disable_clocks;
1238 }
1239
1240 usleep_range(1000, 1500);
1241
1242 ret = reset_control_deassert(res->pci_reset);
1243 if (ret < 0) {
1244 dev_err(dev, "cannot deassert pci reset\n");
1245 goto err_disable_clocks;
1246 }
1247
1248
1249 usleep_range(1000, 1500);
1250
1251
1252 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1253
1254
1255 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1256 val &= ~BIT(0);
1257 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1258
1259
1260 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1261
1262
1263 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1264 val &= ~BIT(29);
1265 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1266
1267 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1268 val |= BIT(4);
1269 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1270
1271
1272 val = readl(pcie->parf + PCIE20_PARF_PM_CTRL);
1273 val &= ~REQ_NOT_ENTR_L1;
1274 writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
1275
1276 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1277 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1278 val |= BIT(31);
1279 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1280 }
1281
1282 return 0;
1283 err_disable_clocks:
1284 clk_bulk_disable_unprepare(res->num_clks, res->clks);
1285 err_disable_regulators:
1286 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1287
1288 return ret;
1289 }
1290
1291 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1292 {
1293 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1294
1295 clk_bulk_disable_unprepare(res->num_clks, res->clks);
1296
1297 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1298 }
1299
1300 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1301 {
1302 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1303 struct dw_pcie *pci = pcie->pci;
1304 struct device *dev = pci->dev;
1305 int ret;
1306
1307 res->clks[0].id = "iface";
1308 res->clks[1].id = "axi_m";
1309 res->clks[2].id = "axi_s";
1310 res->clks[3].id = "axi_bridge";
1311 res->clks[4].id = "rchng";
1312
1313 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1314 if (ret < 0)
1315 return ret;
1316
1317 res->rst = devm_reset_control_array_get_exclusive(dev);
1318 if (IS_ERR(res->rst))
1319 return PTR_ERR(res->rst);
1320
1321 return 0;
1322 }
1323
1324 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1325 {
1326 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1327
1328 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1329 }
1330
1331 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1332 {
1333 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1334 struct device *dev = pcie->pci->dev;
1335 int ret;
1336
1337 ret = reset_control_assert(res->rst);
1338 if (ret) {
1339 dev_err(dev, "reset assert failed (%d)\n", ret);
1340 return ret;
1341 }
1342
1343
1344
1345
1346
1347 usleep_range(2000, 2500);
1348
1349 ret = reset_control_deassert(res->rst);
1350 if (ret) {
1351 dev_err(dev, "reset deassert failed (%d)\n", ret);
1352 return ret;
1353 }
1354
1355 usleep_range(2000, 2500);
1356
1357 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1358 }
1359
1360 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1361 {
1362 struct dw_pcie *pci = pcie->pci;
1363 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1364 u32 val;
1365 int i;
1366
1367 writel(SLV_ADDR_SPACE_SZ,
1368 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1369
1370 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1371 val &= ~BIT(0);
1372 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1373
1374 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1375
1376 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1377 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1378 pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1379 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1380 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1381 pci->dbi_base + GEN3_RELATED_OFF);
1382
1383 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1384 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1385 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1386 pcie->parf + PCIE20_PARF_SYS_CTRL);
1387
1388 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1389
1390 dw_pcie_dbi_ro_wr_en(pci);
1391 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1392
1393 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1394 val &= ~PCI_EXP_LNKCAP_ASPMS;
1395 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1396
1397 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1398 PCI_EXP_DEVCTL2);
1399
1400 for (i = 0; i < 256; i++)
1401 writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i));
1402
1403 return 0;
1404 }
1405
1406 static int qcom_pcie_link_up(struct dw_pcie *pci)
1407 {
1408 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1409 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1410
1411 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1412 }
1413
1414 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
1415 {
1416
1417 struct {
1418 u32 bdf;
1419 u32 phandle;
1420 u32 smmu_sid;
1421 u32 smmu_sid_len;
1422 } *map;
1423 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
1424 struct device *dev = pcie->pci->dev;
1425 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1426 int i, nr_map, size = 0;
1427 u32 smmu_sid_base;
1428
1429 of_get_property(dev->of_node, "iommu-map", &size);
1430 if (!size)
1431 return 0;
1432
1433 map = kzalloc(size, GFP_KERNEL);
1434 if (!map)
1435 return -ENOMEM;
1436
1437 of_property_read_u32_array(dev->of_node,
1438 "iommu-map", (u32 *)map, size / sizeof(u32));
1439
1440 nr_map = size / (sizeof(*map));
1441
1442 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1443
1444
1445 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1446
1447
1448 smmu_sid_base = map[0].smmu_sid;
1449
1450
1451 for (i = 0; i < nr_map; i++) {
1452 __be16 bdf_be = cpu_to_be16(map[i].bdf);
1453 u32 val;
1454 u8 hash;
1455
1456 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
1457 0);
1458
1459 val = readl(bdf_to_sid_base + hash * sizeof(u32));
1460
1461
1462 while (val) {
1463 u8 current_hash = hash++;
1464 u8 next_mask = 0xff;
1465
1466
1467 if (!(val & next_mask)) {
1468 val |= (u32)hash;
1469 writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1470 }
1471
1472 val = readl(bdf_to_sid_base + hash * sizeof(u32));
1473 }
1474
1475
1476 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1477 writel(val, bdf_to_sid_base + hash * sizeof(u32));
1478 }
1479
1480 kfree(map);
1481
1482 return 0;
1483 }
1484
1485 static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1486 {
1487 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1488 struct qcom_pcie *pcie = to_qcom_pcie(pci);
1489 int ret;
1490
1491 qcom_ep_reset_assert(pcie);
1492
1493 ret = pcie->cfg->ops->init(pcie);
1494 if (ret)
1495 return ret;
1496
1497 ret = phy_power_on(pcie->phy);
1498 if (ret)
1499 goto err_deinit;
1500
1501 if (pcie->cfg->ops->post_init) {
1502 ret = pcie->cfg->ops->post_init(pcie);
1503 if (ret)
1504 goto err_disable_phy;
1505 }
1506
1507 qcom_ep_reset_deassert(pcie);
1508
1509 if (pcie->cfg->ops->config_sid) {
1510 ret = pcie->cfg->ops->config_sid(pcie);
1511 if (ret)
1512 goto err;
1513 }
1514
1515 return 0;
1516
1517 err:
1518 qcom_ep_reset_assert(pcie);
1519 if (pcie->cfg->ops->post_deinit)
1520 pcie->cfg->ops->post_deinit(pcie);
1521 err_disable_phy:
1522 phy_power_off(pcie->phy);
1523 err_deinit:
1524 pcie->cfg->ops->deinit(pcie);
1525
1526 return ret;
1527 }
1528
1529 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1530 .host_init = qcom_pcie_host_init,
1531 };
1532
1533
1534 static const struct qcom_pcie_ops ops_2_1_0 = {
1535 .get_resources = qcom_pcie_get_resources_2_1_0,
1536 .init = qcom_pcie_init_2_1_0,
1537 .post_init = qcom_pcie_post_init_2_1_0,
1538 .deinit = qcom_pcie_deinit_2_1_0,
1539 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1540 };
1541
1542
1543 static const struct qcom_pcie_ops ops_1_0_0 = {
1544 .get_resources = qcom_pcie_get_resources_1_0_0,
1545 .init = qcom_pcie_init_1_0_0,
1546 .post_init = qcom_pcie_post_init_1_0_0,
1547 .deinit = qcom_pcie_deinit_1_0_0,
1548 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1549 };
1550
1551
1552 static const struct qcom_pcie_ops ops_2_3_2 = {
1553 .get_resources = qcom_pcie_get_resources_2_3_2,
1554 .init = qcom_pcie_init_2_3_2,
1555 .post_init = qcom_pcie_post_init_2_3_2,
1556 .deinit = qcom_pcie_deinit_2_3_2,
1557 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1558 };
1559
1560
1561 static const struct qcom_pcie_ops ops_2_4_0 = {
1562 .get_resources = qcom_pcie_get_resources_2_4_0,
1563 .init = qcom_pcie_init_2_4_0,
1564 .post_init = qcom_pcie_post_init_2_4_0,
1565 .deinit = qcom_pcie_deinit_2_4_0,
1566 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1567 };
1568
1569
1570 static const struct qcom_pcie_ops ops_2_3_3 = {
1571 .get_resources = qcom_pcie_get_resources_2_3_3,
1572 .init = qcom_pcie_init_2_3_3,
1573 .post_init = qcom_pcie_post_init_2_3_3,
1574 .deinit = qcom_pcie_deinit_2_3_3,
1575 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1576 };
1577
1578
1579 static const struct qcom_pcie_ops ops_2_7_0 = {
1580 .get_resources = qcom_pcie_get_resources_2_7_0,
1581 .init = qcom_pcie_init_2_7_0,
1582 .deinit = qcom_pcie_deinit_2_7_0,
1583 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1584 };
1585
1586
1587 static const struct qcom_pcie_ops ops_1_9_0 = {
1588 .get_resources = qcom_pcie_get_resources_2_7_0,
1589 .init = qcom_pcie_init_2_7_0,
1590 .deinit = qcom_pcie_deinit_2_7_0,
1591 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1592 .config_sid = qcom_pcie_config_sid_sm8250,
1593 };
1594
1595
1596 static const struct qcom_pcie_ops ops_2_9_0 = {
1597 .get_resources = qcom_pcie_get_resources_2_9_0,
1598 .init = qcom_pcie_init_2_9_0,
1599 .post_init = qcom_pcie_post_init_2_9_0,
1600 .deinit = qcom_pcie_deinit_2_9_0,
1601 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1602 };
1603
1604 static const struct qcom_pcie_cfg apq8084_cfg = {
1605 .ops = &ops_1_0_0,
1606 };
1607
1608 static const struct qcom_pcie_cfg ipq8064_cfg = {
1609 .ops = &ops_2_1_0,
1610 };
1611
1612 static const struct qcom_pcie_cfg msm8996_cfg = {
1613 .ops = &ops_2_3_2,
1614 };
1615
1616 static const struct qcom_pcie_cfg ipq8074_cfg = {
1617 .ops = &ops_2_3_3,
1618 };
1619
1620 static const struct qcom_pcie_cfg ipq4019_cfg = {
1621 .ops = &ops_2_4_0,
1622 };
1623
1624 static const struct qcom_pcie_cfg sdm845_cfg = {
1625 .ops = &ops_2_7_0,
1626 .has_tbu_clk = true,
1627 };
1628
1629 static const struct qcom_pcie_cfg sm8150_cfg = {
1630
1631
1632
1633 .ops = &ops_1_9_0,
1634 };
1635
1636 static const struct qcom_pcie_cfg sm8250_cfg = {
1637 .ops = &ops_1_9_0,
1638 .has_tbu_clk = true,
1639 .has_ddrss_sf_tbu_clk = true,
1640 };
1641
1642 static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
1643 .ops = &ops_1_9_0,
1644 .has_ddrss_sf_tbu_clk = true,
1645 .has_aggre0_clk = true,
1646 .has_aggre1_clk = true,
1647 };
1648
1649 static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
1650 .ops = &ops_1_9_0,
1651 .has_ddrss_sf_tbu_clk = true,
1652 .has_aggre1_clk = true,
1653 };
1654
1655 static const struct qcom_pcie_cfg sc7280_cfg = {
1656 .ops = &ops_1_9_0,
1657 .has_tbu_clk = true,
1658 };
1659
1660 static const struct qcom_pcie_cfg sc8180x_cfg = {
1661 .ops = &ops_1_9_0,
1662 .has_tbu_clk = true,
1663 };
1664
1665 static const struct qcom_pcie_cfg ipq6018_cfg = {
1666 .ops = &ops_2_9_0,
1667 };
1668
1669 static const struct dw_pcie_ops dw_pcie_ops = {
1670 .link_up = qcom_pcie_link_up,
1671 .start_link = qcom_pcie_start_link,
1672 };
1673
1674 static int qcom_pcie_probe(struct platform_device *pdev)
1675 {
1676 struct device *dev = &pdev->dev;
1677 struct dw_pcie_rp *pp;
1678 struct dw_pcie *pci;
1679 struct qcom_pcie *pcie;
1680 const struct qcom_pcie_cfg *pcie_cfg;
1681 int ret;
1682
1683 pcie_cfg = of_device_get_match_data(dev);
1684 if (!pcie_cfg || !pcie_cfg->ops) {
1685 dev_err(dev, "Invalid platform data\n");
1686 return -EINVAL;
1687 }
1688
1689 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1690 if (!pcie)
1691 return -ENOMEM;
1692
1693 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1694 if (!pci)
1695 return -ENOMEM;
1696
1697 pm_runtime_enable(dev);
1698 ret = pm_runtime_get_sync(dev);
1699 if (ret < 0)
1700 goto err_pm_runtime_put;
1701
1702 pci->dev = dev;
1703 pci->ops = &dw_pcie_ops;
1704 pp = &pci->pp;
1705
1706 pcie->pci = pci;
1707
1708 pcie->cfg = pcie_cfg;
1709
1710 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1711 if (IS_ERR(pcie->reset)) {
1712 ret = PTR_ERR(pcie->reset);
1713 goto err_pm_runtime_put;
1714 }
1715
1716 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1717 if (IS_ERR(pcie->parf)) {
1718 ret = PTR_ERR(pcie->parf);
1719 goto err_pm_runtime_put;
1720 }
1721
1722 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1723 if (IS_ERR(pcie->elbi)) {
1724 ret = PTR_ERR(pcie->elbi);
1725 goto err_pm_runtime_put;
1726 }
1727
1728 pcie->phy = devm_phy_optional_get(dev, "pciephy");
1729 if (IS_ERR(pcie->phy)) {
1730 ret = PTR_ERR(pcie->phy);
1731 goto err_pm_runtime_put;
1732 }
1733
1734 ret = pcie->cfg->ops->get_resources(pcie);
1735 if (ret)
1736 goto err_pm_runtime_put;
1737
1738 pp->ops = &qcom_pcie_dw_ops;
1739
1740 ret = phy_init(pcie->phy);
1741 if (ret)
1742 goto err_pm_runtime_put;
1743
1744 platform_set_drvdata(pdev, pcie);
1745
1746 ret = dw_pcie_host_init(pp);
1747 if (ret) {
1748 dev_err(dev, "cannot initialize host\n");
1749 goto err_phy_exit;
1750 }
1751
1752 return 0;
1753
1754 err_phy_exit:
1755 phy_exit(pcie->phy);
1756 err_pm_runtime_put:
1757 pm_runtime_put(dev);
1758 pm_runtime_disable(dev);
1759
1760 return ret;
1761 }
1762
1763 static const struct of_device_id qcom_pcie_match[] = {
1764 { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg },
1765 { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg },
1766 { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg },
1767 { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg },
1768 { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg },
1769 { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg },
1770 { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
1771 { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
1772 { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
1773 { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg },
1774 { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
1775 { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
1776 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
1777 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
1778 { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
1779 { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
1780 { }
1781 };
1782
1783 static void qcom_fixup_class(struct pci_dev *dev)
1784 {
1785 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
1786 }
1787 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1788 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1789 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1790 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1791 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1792 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1793 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1794
1795 static struct platform_driver qcom_pcie_driver = {
1796 .probe = qcom_pcie_probe,
1797 .driver = {
1798 .name = "qcom-pcie",
1799 .suppress_bind_attrs = true,
1800 .of_match_table = qcom_pcie_match,
1801 },
1802 };
1803 builtin_platform_driver(qcom_pcie_driver);