Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * PCIe host controller driver for Texas Instruments Keystone SoCs
0004  *
0005  * Copyright (C) 2013-2014 Texas Instruments., Ltd.
0006  *      https://www.ti.com
0007  *
0008  * Author: Murali Karicheri <m-karicheri2@ti.com>
0009  * Implementation based on pci-exynos.c and pcie-designware.c
0010  */
0011 
0012 #include <linux/clk.h>
0013 #include <linux/delay.h>
0014 #include <linux/gpio/consumer.h>
0015 #include <linux/init.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/irqchip/chained_irq.h>
0018 #include <linux/irqdomain.h>
0019 #include <linux/mfd/syscon.h>
0020 #include <linux/msi.h>
0021 #include <linux/of.h>
0022 #include <linux/of_device.h>
0023 #include <linux/of_irq.h>
0024 #include <linux/of_pci.h>
0025 #include <linux/phy/phy.h>
0026 #include <linux/platform_device.h>
0027 #include <linux/regmap.h>
0028 #include <linux/resource.h>
0029 #include <linux/signal.h>
0030 
0031 #include "../../pci.h"
0032 #include "pcie-designware.h"
0033 
0034 #define PCIE_VENDORID_MASK  0xffff
0035 #define PCIE_DEVICEID_SHIFT 16
0036 
0037 /* Application registers */
0038 #define CMD_STATUS          0x004
0039 #define LTSSM_EN_VAL                BIT(0)
0040 #define OB_XLAT_EN_VAL              BIT(1)
0041 #define DBI_CS2             BIT(5)
0042 
0043 #define CFG_SETUP           0x008
0044 #define CFG_BUS(x)          (((x) & 0xff) << 16)
0045 #define CFG_DEVICE(x)           (((x) & 0x1f) << 8)
0046 #define CFG_FUNC(x)         ((x) & 0x7)
0047 #define CFG_TYPE1           BIT(24)
0048 
0049 #define OB_SIZE             0x030
0050 #define OB_OFFSET_INDEX(n)      (0x200 + (8 * (n)))
0051 #define OB_OFFSET_HI(n)         (0x204 + (8 * (n)))
0052 #define OB_ENABLEN          BIT(0)
0053 #define OB_WIN_SIZE         8   /* 8MB */
0054 
0055 #define PCIE_LEGACY_IRQ_ENABLE_SET(n)   (0x188 + (0x10 * ((n) - 1)))
0056 #define PCIE_LEGACY_IRQ_ENABLE_CLR(n)   (0x18c + (0x10 * ((n) - 1)))
0057 #define PCIE_EP_IRQ_SET         0x64
0058 #define PCIE_EP_IRQ_CLR         0x68
0059 #define INT_ENABLE          BIT(0)
0060 
0061 /* IRQ register defines */
0062 #define IRQ_EOI             0x050
0063 
0064 #define MSI_IRQ             0x054
0065 #define MSI_IRQ_STATUS(n)       (0x104 + ((n) << 4))
0066 #define MSI_IRQ_ENABLE_SET(n)       (0x108 + ((n) << 4))
0067 #define MSI_IRQ_ENABLE_CLR(n)       (0x10c + ((n) << 4))
0068 #define MSI_IRQ_OFFSET          4
0069 
0070 #define IRQ_STATUS(n)           (0x184 + ((n) << 4))
0071 #define IRQ_ENABLE_SET(n)       (0x188 + ((n) << 4))
0072 #define INTx_EN             BIT(0)
0073 
0074 #define ERR_IRQ_STATUS          0x1c4
0075 #define ERR_IRQ_ENABLE_SET      0x1c8
0076 #define ERR_AER             BIT(5)  /* ECRC error */
0077 #define AM6_ERR_AER         BIT(4)  /* AM6 ECRC error */
0078 #define ERR_AXI             BIT(4)  /* AXI tag lookup fatal error */
0079 #define ERR_CORR            BIT(3)  /* Correctable error */
0080 #define ERR_NONFATAL            BIT(2)  /* Non-fatal error */
0081 #define ERR_FATAL           BIT(1)  /* Fatal error */
0082 #define ERR_SYS             BIT(0)  /* System error */
0083 #define ERR_IRQ_ALL         (ERR_AER | ERR_AXI | ERR_CORR | \
0084                      ERR_NONFATAL | ERR_FATAL | ERR_SYS)
0085 
0086 /* PCIE controller device IDs */
0087 #define PCIE_RC_K2HK            0xb008
0088 #define PCIE_RC_K2E         0xb009
0089 #define PCIE_RC_K2L         0xb00a
0090 #define PCIE_RC_K2G         0xb00b
0091 
0092 #define KS_PCIE_DEV_TYPE_MASK       (0x3 << 1)
0093 #define KS_PCIE_DEV_TYPE(mode)      ((mode) << 1)
0094 
0095 #define EP              0x0
0096 #define LEG_EP              0x1
0097 #define RC              0x2
0098 
0099 #define KS_PCIE_SYSCLOCKOUTEN       BIT(0)
0100 
0101 #define AM654_PCIE_DEV_TYPE_MASK    0x3
0102 #define AM654_WIN_SIZE          SZ_64K
0103 
0104 #define APP_ADDR_SPACE_0        (16 * SZ_1K)
0105 
0106 #define to_keystone_pcie(x)     dev_get_drvdata((x)->dev)
0107 
0108 struct ks_pcie_of_data {
0109     enum dw_pcie_device_mode mode;
0110     const struct dw_pcie_host_ops *host_ops;
0111     const struct dw_pcie_ep_ops *ep_ops;
0112     u32 version;
0113 };
0114 
0115 struct keystone_pcie {
0116     struct dw_pcie      *pci;
0117     /* PCI Device ID */
0118     u32         device_id;
0119     int         legacy_host_irqs[PCI_NUM_INTX];
0120     struct          device_node *legacy_intc_np;
0121 
0122     int         msi_host_irq;
0123     int         num_lanes;
0124     u32         num_viewport;
0125     struct phy      **phy;
0126     struct device_link  **link;
0127     struct          device_node *msi_intc_np;
0128     struct irq_domain   *legacy_irq_domain;
0129     struct device_node  *np;
0130 
0131     /* Application register space */
0132     void __iomem        *va_app_base;   /* DT 1st resource */
0133     struct resource     app;
0134     bool            is_am6;
0135 };
0136 
0137 static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
0138 {
0139     return readl(ks_pcie->va_app_base + offset);
0140 }
0141 
0142 static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
0143                    u32 val)
0144 {
0145     writel(val, ks_pcie->va_app_base + offset);
0146 }
0147 
0148 static void ks_pcie_msi_irq_ack(struct irq_data *data)
0149 {
0150     struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(data);
0151     struct keystone_pcie *ks_pcie;
0152     u32 irq = data->hwirq;
0153     struct dw_pcie *pci;
0154     u32 reg_offset;
0155     u32 bit_pos;
0156 
0157     pci = to_dw_pcie_from_pp(pp);
0158     ks_pcie = to_keystone_pcie(pci);
0159 
0160     reg_offset = irq % 8;
0161     bit_pos = irq >> 3;
0162 
0163     ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
0164                BIT(bit_pos));
0165     ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
0166 }
0167 
0168 static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
0169 {
0170     struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
0171     struct keystone_pcie *ks_pcie;
0172     struct dw_pcie *pci;
0173     u64 msi_target;
0174 
0175     pci = to_dw_pcie_from_pp(pp);
0176     ks_pcie = to_keystone_pcie(pci);
0177 
0178     msi_target = ks_pcie->app.start + MSI_IRQ;
0179     msg->address_lo = lower_32_bits(msi_target);
0180     msg->address_hi = upper_32_bits(msi_target);
0181     msg->data = data->hwirq;
0182 
0183     dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
0184         (int)data->hwirq, msg->address_hi, msg->address_lo);
0185 }
0186 
0187 static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
0188                     const struct cpumask *mask, bool force)
0189 {
0190     return -EINVAL;
0191 }
0192 
0193 static void ks_pcie_msi_mask(struct irq_data *data)
0194 {
0195     struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
0196     struct keystone_pcie *ks_pcie;
0197     u32 irq = data->hwirq;
0198     struct dw_pcie *pci;
0199     unsigned long flags;
0200     u32 reg_offset;
0201     u32 bit_pos;
0202 
0203     raw_spin_lock_irqsave(&pp->lock, flags);
0204 
0205     pci = to_dw_pcie_from_pp(pp);
0206     ks_pcie = to_keystone_pcie(pci);
0207 
0208     reg_offset = irq % 8;
0209     bit_pos = irq >> 3;
0210 
0211     ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
0212                BIT(bit_pos));
0213 
0214     raw_spin_unlock_irqrestore(&pp->lock, flags);
0215 }
0216 
0217 static void ks_pcie_msi_unmask(struct irq_data *data)
0218 {
0219     struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
0220     struct keystone_pcie *ks_pcie;
0221     u32 irq = data->hwirq;
0222     struct dw_pcie *pci;
0223     unsigned long flags;
0224     u32 reg_offset;
0225     u32 bit_pos;
0226 
0227     raw_spin_lock_irqsave(&pp->lock, flags);
0228 
0229     pci = to_dw_pcie_from_pp(pp);
0230     ks_pcie = to_keystone_pcie(pci);
0231 
0232     reg_offset = irq % 8;
0233     bit_pos = irq >> 3;
0234 
0235     ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
0236                BIT(bit_pos));
0237 
0238     raw_spin_unlock_irqrestore(&pp->lock, flags);
0239 }
0240 
0241 static struct irq_chip ks_pcie_msi_irq_chip = {
0242     .name = "KEYSTONE-PCI-MSI",
0243     .irq_ack = ks_pcie_msi_irq_ack,
0244     .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
0245     .irq_set_affinity = ks_pcie_msi_set_affinity,
0246     .irq_mask = ks_pcie_msi_mask,
0247     .irq_unmask = ks_pcie_msi_unmask,
0248 };
0249 
0250 static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
0251 {
0252     pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
0253     return dw_pcie_allocate_domains(pp);
0254 }
0255 
0256 static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
0257                       int offset)
0258 {
0259     struct dw_pcie *pci = ks_pcie->pci;
0260     struct device *dev = pci->dev;
0261     u32 pending;
0262 
0263     pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
0264 
0265     if (BIT(0) & pending) {
0266         dev_dbg(dev, ": irq: irq_offset %d", offset);
0267         generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
0268     }
0269 
0270     /* EOI the INTx interrupt */
0271     ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
0272 }
0273 
0274 static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
0275 {
0276     ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
0277 }
0278 
0279 static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
0280 {
0281     u32 reg;
0282     struct device *dev = ks_pcie->pci->dev;
0283 
0284     reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
0285     if (!reg)
0286         return IRQ_NONE;
0287 
0288     if (reg & ERR_SYS)
0289         dev_err(dev, "System Error\n");
0290 
0291     if (reg & ERR_FATAL)
0292         dev_err(dev, "Fatal Error\n");
0293 
0294     if (reg & ERR_NONFATAL)
0295         dev_dbg(dev, "Non Fatal Error\n");
0296 
0297     if (reg & ERR_CORR)
0298         dev_dbg(dev, "Correctable Error\n");
0299 
0300     if (!ks_pcie->is_am6 && (reg & ERR_AXI))
0301         dev_err(dev, "AXI tag lookup fatal Error\n");
0302 
0303     if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
0304         dev_err(dev, "ECRC Error\n");
0305 
0306     ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
0307 
0308     return IRQ_HANDLED;
0309 }
0310 
0311 static void ks_pcie_ack_legacy_irq(struct irq_data *d)
0312 {
0313 }
0314 
0315 static void ks_pcie_mask_legacy_irq(struct irq_data *d)
0316 {
0317 }
0318 
0319 static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
0320 {
0321 }
0322 
0323 static struct irq_chip ks_pcie_legacy_irq_chip = {
0324     .name = "Keystone-PCI-Legacy-IRQ",
0325     .irq_ack = ks_pcie_ack_legacy_irq,
0326     .irq_mask = ks_pcie_mask_legacy_irq,
0327     .irq_unmask = ks_pcie_unmask_legacy_irq,
0328 };
0329 
0330 static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
0331                        unsigned int irq,
0332                        irq_hw_number_t hw_irq)
0333 {
0334     irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
0335                  handle_level_irq);
0336     irq_set_chip_data(irq, d->host_data);
0337 
0338     return 0;
0339 }
0340 
0341 static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
0342     .map = ks_pcie_init_legacy_irq_map,
0343     .xlate = irq_domain_xlate_onetwocell,
0344 };
0345 
0346 /**
0347  * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
0348  * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
0349  *       PCIe host controller driver information.
0350  *
0351  * Since modification of dbi_cs2 involves different clock domain, read the
0352  * status back to ensure the transition is complete.
0353  */
0354 static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
0355 {
0356     u32 val;
0357 
0358     val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0359     val |= DBI_CS2;
0360     ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
0361 
0362     do {
0363         val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0364     } while (!(val & DBI_CS2));
0365 }
0366 
0367 /**
0368  * ks_pcie_clear_dbi_mode() - Disable DBI mode
0369  * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
0370  *       PCIe host controller driver information.
0371  *
0372  * Since modification of dbi_cs2 involves different clock domain, read the
0373  * status back to ensure the transition is complete.
0374  */
0375 static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
0376 {
0377     u32 val;
0378 
0379     val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0380     val &= ~DBI_CS2;
0381     ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
0382 
0383     do {
0384         val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0385     } while (val & DBI_CS2);
0386 }
0387 
0388 static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
0389 {
0390     u32 val;
0391     u32 num_viewport = ks_pcie->num_viewport;
0392     struct dw_pcie *pci = ks_pcie->pci;
0393     struct dw_pcie_rp *pp = &pci->pp;
0394     u64 start, end;
0395     struct resource *mem;
0396     int i;
0397 
0398     mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
0399     start = mem->start;
0400     end = mem->end;
0401 
0402     /* Disable BARs for inbound access */
0403     ks_pcie_set_dbi_mode(ks_pcie);
0404     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
0405     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
0406     ks_pcie_clear_dbi_mode(ks_pcie);
0407 
0408     if (ks_pcie->is_am6)
0409         return;
0410 
0411     val = ilog2(OB_WIN_SIZE);
0412     ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
0413 
0414     /* Using Direct 1:1 mapping of RC <-> PCI memory space */
0415     for (i = 0; i < num_viewport && (start < end); i++) {
0416         ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
0417                    lower_32_bits(start) | OB_ENABLEN);
0418         ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
0419                    upper_32_bits(start));
0420         start += OB_WIN_SIZE * SZ_1M;
0421     }
0422 
0423     val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0424     val |= OB_XLAT_EN_VAL;
0425     ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
0426 }
0427 
0428 static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
0429                        unsigned int devfn, int where)
0430 {
0431     struct dw_pcie_rp *pp = bus->sysdata;
0432     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0433     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0434     u32 reg;
0435 
0436     reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
0437         CFG_FUNC(PCI_FUNC(devfn));
0438     if (!pci_is_root_bus(bus->parent))
0439         reg |= CFG_TYPE1;
0440     ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
0441 
0442     return pp->va_cfg0_base + where;
0443 }
0444 
0445 static struct pci_ops ks_child_pcie_ops = {
0446     .map_bus = ks_pcie_other_map_bus,
0447     .read = pci_generic_config_read,
0448     .write = pci_generic_config_write,
0449 };
0450 
0451 /**
0452  * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
0453  * @bus: A pointer to the PCI bus structure.
0454  *
0455  * This sets BAR0 to enable inbound access for MSI_IRQ register
0456  */
0457 static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
0458 {
0459     struct dw_pcie_rp *pp = bus->sysdata;
0460     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0461     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0462 
0463     if (!pci_is_root_bus(bus))
0464         return 0;
0465 
0466     /* Configure and set up BAR0 */
0467     ks_pcie_set_dbi_mode(ks_pcie);
0468 
0469     /* Enable BAR0 */
0470     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
0471     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
0472 
0473     ks_pcie_clear_dbi_mode(ks_pcie);
0474 
0475      /*
0476       * For BAR0, just setting bus address for inbound writes (MSI) should
0477       * be sufficient.  Use physical address to avoid any conflicts.
0478       */
0479     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
0480 
0481     return 0;
0482 }
0483 
0484 static struct pci_ops ks_pcie_ops = {
0485     .map_bus = dw_pcie_own_conf_map_bus,
0486     .read = pci_generic_config_read,
0487     .write = pci_generic_config_write,
0488     .add_bus = ks_pcie_v3_65_add_bus,
0489 };
0490 
0491 /**
0492  * ks_pcie_link_up() - Check if link up
0493  * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
0494  *   controller driver information.
0495  */
0496 static int ks_pcie_link_up(struct dw_pcie *pci)
0497 {
0498     u32 val;
0499 
0500     val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
0501     val &= PORT_LOGIC_LTSSM_STATE_MASK;
0502     return (val == PORT_LOGIC_LTSSM_STATE_L0);
0503 }
0504 
0505 static void ks_pcie_stop_link(struct dw_pcie *pci)
0506 {
0507     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0508     u32 val;
0509 
0510     /* Disable Link training */
0511     val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0512     val &= ~LTSSM_EN_VAL;
0513     ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
0514 }
0515 
0516 static int ks_pcie_start_link(struct dw_pcie *pci)
0517 {
0518     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0519     u32 val;
0520 
0521     /* Initiate Link Training */
0522     val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
0523     ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
0524 
0525     return 0;
0526 }
0527 
0528 static void ks_pcie_quirk(struct pci_dev *dev)
0529 {
0530     struct pci_bus *bus = dev->bus;
0531     struct pci_dev *bridge;
0532     static const struct pci_device_id rc_pci_devids[] = {
0533         { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
0534          .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
0535         { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
0536          .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
0537         { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
0538          .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
0539         { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
0540          .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
0541         { 0, },
0542     };
0543 
0544     if (pci_is_root_bus(bus))
0545         bridge = dev;
0546 
0547     /* look for the host bridge */
0548     while (!pci_is_root_bus(bus)) {
0549         bridge = bus->self;
0550         bus = bus->parent;
0551     }
0552 
0553     if (!bridge)
0554         return;
0555 
0556     /*
0557      * Keystone PCI controller has a h/w limitation of
0558      * 256 bytes maximum read request size.  It can't handle
0559      * anything higher than this.  So force this limit on
0560      * all downstream devices.
0561      */
0562     if (pci_match_id(rc_pci_devids, bridge)) {
0563         if (pcie_get_readrq(dev) > 256) {
0564             dev_info(&dev->dev, "limiting MRRS to 256\n");
0565             pcie_set_readrq(dev, 256);
0566         }
0567     }
0568 }
0569 DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
0570 
0571 static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
0572 {
0573     unsigned int irq = desc->irq_data.hwirq;
0574     struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
0575     u32 offset = irq - ks_pcie->msi_host_irq;
0576     struct dw_pcie *pci = ks_pcie->pci;
0577     struct dw_pcie_rp *pp = &pci->pp;
0578     struct device *dev = pci->dev;
0579     struct irq_chip *chip = irq_desc_get_chip(desc);
0580     u32 vector, reg, pos;
0581 
0582     dev_dbg(dev, "%s, irq %d\n", __func__, irq);
0583 
0584     /*
0585      * The chained irq handler installation would have replaced normal
0586      * interrupt driver handler so we need to take care of mask/unmask and
0587      * ack operation.
0588      */
0589     chained_irq_enter(chip, desc);
0590 
0591     reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
0592     /*
0593      * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
0594      * shows 1, 9, 17, 25 and so forth
0595      */
0596     for (pos = 0; pos < 4; pos++) {
0597         if (!(reg & BIT(pos)))
0598             continue;
0599 
0600         vector = offset + (pos << 3);
0601         dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
0602         generic_handle_domain_irq(pp->irq_domain, vector);
0603     }
0604 
0605     chained_irq_exit(chip, desc);
0606 }
0607 
0608 /**
0609  * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
0610  * @desc: Pointer to irq descriptor
0611  *
0612  * Traverse through pending legacy interrupts and invoke handler for each. Also
0613  * takes care of interrupt controller level mask/ack operation.
0614  */
0615 static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
0616 {
0617     unsigned int irq = irq_desc_get_irq(desc);
0618     struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
0619     struct dw_pcie *pci = ks_pcie->pci;
0620     struct device *dev = pci->dev;
0621     u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
0622     struct irq_chip *chip = irq_desc_get_chip(desc);
0623 
0624     dev_dbg(dev, ": Handling legacy irq %d\n", irq);
0625 
0626     /*
0627      * The chained irq handler installation would have replaced normal
0628      * interrupt driver handler so we need to take care of mask/unmask and
0629      * ack operation.
0630      */
0631     chained_irq_enter(chip, desc);
0632     ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
0633     chained_irq_exit(chip, desc);
0634 }
0635 
0636 static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
0637 {
0638     struct device *dev = ks_pcie->pci->dev;
0639     struct device_node *np = ks_pcie->np;
0640     struct device_node *intc_np;
0641     struct irq_data *irq_data;
0642     int irq_count, irq, ret, i;
0643 
0644     if (!IS_ENABLED(CONFIG_PCI_MSI))
0645         return 0;
0646 
0647     intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
0648     if (!intc_np) {
0649         if (ks_pcie->is_am6)
0650             return 0;
0651         dev_warn(dev, "msi-interrupt-controller node is absent\n");
0652         return -EINVAL;
0653     }
0654 
0655     irq_count = of_irq_count(intc_np);
0656     if (!irq_count) {
0657         dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
0658         ret = -EINVAL;
0659         goto err;
0660     }
0661 
0662     for (i = 0; i < irq_count; i++) {
0663         irq = irq_of_parse_and_map(intc_np, i);
0664         if (!irq) {
0665             ret = -EINVAL;
0666             goto err;
0667         }
0668 
0669         if (!ks_pcie->msi_host_irq) {
0670             irq_data = irq_get_irq_data(irq);
0671             if (!irq_data) {
0672                 ret = -EINVAL;
0673                 goto err;
0674             }
0675             ks_pcie->msi_host_irq = irq_data->hwirq;
0676         }
0677 
0678         irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
0679                          ks_pcie);
0680     }
0681 
0682     of_node_put(intc_np);
0683     return 0;
0684 
0685 err:
0686     of_node_put(intc_np);
0687     return ret;
0688 }
0689 
0690 static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
0691 {
0692     struct device *dev = ks_pcie->pci->dev;
0693     struct irq_domain *legacy_irq_domain;
0694     struct device_node *np = ks_pcie->np;
0695     struct device_node *intc_np;
0696     int irq_count, irq, ret = 0, i;
0697 
0698     intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
0699     if (!intc_np) {
0700         /*
0701          * Since legacy interrupts are modeled as edge-interrupts in
0702          * AM6, keep it disabled for now.
0703          */
0704         if (ks_pcie->is_am6)
0705             return 0;
0706         dev_warn(dev, "legacy-interrupt-controller node is absent\n");
0707         return -EINVAL;
0708     }
0709 
0710     irq_count = of_irq_count(intc_np);
0711     if (!irq_count) {
0712         dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
0713         ret = -EINVAL;
0714         goto err;
0715     }
0716 
0717     for (i = 0; i < irq_count; i++) {
0718         irq = irq_of_parse_and_map(intc_np, i);
0719         if (!irq) {
0720             ret = -EINVAL;
0721             goto err;
0722         }
0723         ks_pcie->legacy_host_irqs[i] = irq;
0724 
0725         irq_set_chained_handler_and_data(irq,
0726                          ks_pcie_legacy_irq_handler,
0727                          ks_pcie);
0728     }
0729 
0730     legacy_irq_domain =
0731         irq_domain_add_linear(intc_np, PCI_NUM_INTX,
0732                       &ks_pcie_legacy_irq_domain_ops, NULL);
0733     if (!legacy_irq_domain) {
0734         dev_err(dev, "Failed to add irq domain for legacy irqs\n");
0735         ret = -EINVAL;
0736         goto err;
0737     }
0738     ks_pcie->legacy_irq_domain = legacy_irq_domain;
0739 
0740     for (i = 0; i < PCI_NUM_INTX; i++)
0741         ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
0742 
0743 err:
0744     of_node_put(intc_np);
0745     return ret;
0746 }
0747 
0748 #ifdef CONFIG_ARM
0749 /*
0750  * When a PCI device does not exist during config cycles, keystone host
0751  * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
0752  * This handler always returns 0 for this kind of fault.
0753  */
0754 static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
0755              struct pt_regs *regs)
0756 {
0757     unsigned long instr = *(unsigned long *) instruction_pointer(regs);
0758 
0759     if ((instr & 0x0e100090) == 0x00100090) {
0760         int reg = (instr >> 12) & 15;
0761 
0762         regs->uregs[reg] = -1;
0763         regs->ARM_pc += 4;
0764     }
0765 
0766     return 0;
0767 }
0768 #endif
0769 
0770 static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
0771 {
0772     int ret;
0773     unsigned int id;
0774     struct regmap *devctrl_regs;
0775     struct dw_pcie *pci = ks_pcie->pci;
0776     struct device *dev = pci->dev;
0777     struct device_node *np = dev->of_node;
0778     struct of_phandle_args args;
0779     unsigned int offset = 0;
0780 
0781     devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
0782     if (IS_ERR(devctrl_regs))
0783         return PTR_ERR(devctrl_regs);
0784 
0785     /* Do not error out to maintain old DT compatibility */
0786     ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
0787     if (!ret)
0788         offset = args.args[0];
0789 
0790     ret = regmap_read(devctrl_regs, offset, &id);
0791     if (ret)
0792         return ret;
0793 
0794     dw_pcie_dbi_ro_wr_en(pci);
0795     dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
0796     dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
0797     dw_pcie_dbi_ro_wr_dis(pci);
0798 
0799     return 0;
0800 }
0801 
0802 static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
0803 {
0804     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0805     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0806     int ret;
0807 
0808     pp->bridge->ops = &ks_pcie_ops;
0809     if (!ks_pcie->is_am6)
0810         pp->bridge->child_ops = &ks_child_pcie_ops;
0811 
0812     ret = ks_pcie_config_legacy_irq(ks_pcie);
0813     if (ret)
0814         return ret;
0815 
0816     ret = ks_pcie_config_msi_irq(ks_pcie);
0817     if (ret)
0818         return ret;
0819 
0820     ks_pcie_stop_link(pci);
0821     ks_pcie_setup_rc_app_regs(ks_pcie);
0822     writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
0823             pci->dbi_base + PCI_IO_BASE);
0824 
0825     ret = ks_pcie_init_id(ks_pcie);
0826     if (ret < 0)
0827         return ret;
0828 
0829 #ifdef CONFIG_ARM
0830     /*
0831      * PCIe access errors that result into OCP errors are caught by ARM as
0832      * "External aborts"
0833      */
0834     hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
0835             "Asynchronous external abort");
0836 #endif
0837 
0838     return 0;
0839 }
0840 
0841 static const struct dw_pcie_host_ops ks_pcie_host_ops = {
0842     .host_init = ks_pcie_host_init,
0843     .msi_host_init = ks_pcie_msi_host_init,
0844 };
0845 
0846 static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
0847     .host_init = ks_pcie_host_init,
0848 };
0849 
0850 static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
0851 {
0852     struct keystone_pcie *ks_pcie = priv;
0853 
0854     return ks_pcie_handle_error_irq(ks_pcie);
0855 }
0856 
0857 static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
0858                      u32 reg, size_t size, u32 val)
0859 {
0860     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0861 
0862     ks_pcie_set_dbi_mode(ks_pcie);
0863     dw_pcie_write(base + reg, size, val);
0864     ks_pcie_clear_dbi_mode(ks_pcie);
0865 }
0866 
0867 static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
0868     .start_link = ks_pcie_start_link,
0869     .stop_link = ks_pcie_stop_link,
0870     .link_up = ks_pcie_link_up,
0871     .write_dbi2 = ks_pcie_am654_write_dbi2,
0872 };
0873 
0874 static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
0875 {
0876     struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
0877     int flags;
0878 
0879     ep->page_size = AM654_WIN_SIZE;
0880     flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
0881     dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
0882     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
0883 }
0884 
0885 static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
0886 {
0887     struct dw_pcie *pci = ks_pcie->pci;
0888     u8 int_pin;
0889 
0890     int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
0891     if (int_pin == 0 || int_pin > 4)
0892         return;
0893 
0894     ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
0895                INT_ENABLE);
0896     ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
0897     mdelay(1);
0898     ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
0899     ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
0900                INT_ENABLE);
0901 }
0902 
0903 static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
0904                    enum pci_epc_irq_type type,
0905                    u16 interrupt_num)
0906 {
0907     struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
0908     struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
0909 
0910     switch (type) {
0911     case PCI_EPC_IRQ_LEGACY:
0912         ks_pcie_am654_raise_legacy_irq(ks_pcie);
0913         break;
0914     case PCI_EPC_IRQ_MSI:
0915         dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
0916         break;
0917     case PCI_EPC_IRQ_MSIX:
0918         dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
0919         break;
0920     default:
0921         dev_err(pci->dev, "UNKNOWN IRQ type\n");
0922         return -EINVAL;
0923     }
0924 
0925     return 0;
0926 }
0927 
0928 static const struct pci_epc_features ks_pcie_am654_epc_features = {
0929     .linkup_notifier = false,
0930     .msi_capable = true,
0931     .msix_capable = true,
0932     .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
0933     .bar_fixed_64bit = 1 << BAR_0,
0934     .bar_fixed_size[2] = SZ_1M,
0935     .bar_fixed_size[3] = SZ_64K,
0936     .bar_fixed_size[4] = 256,
0937     .bar_fixed_size[5] = SZ_1M,
0938     .align = SZ_1M,
0939 };
0940 
0941 static const struct pci_epc_features*
0942 ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
0943 {
0944     return &ks_pcie_am654_epc_features;
0945 }
0946 
0947 static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
0948     .ep_init = ks_pcie_am654_ep_init,
0949     .raise_irq = ks_pcie_am654_raise_irq,
0950     .get_features = &ks_pcie_am654_get_features,
0951 };
0952 
0953 static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
0954 {
0955     int num_lanes = ks_pcie->num_lanes;
0956 
0957     while (num_lanes--) {
0958         phy_power_off(ks_pcie->phy[num_lanes]);
0959         phy_exit(ks_pcie->phy[num_lanes]);
0960     }
0961 }
0962 
0963 static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
0964 {
0965     int i;
0966     int ret;
0967     int num_lanes = ks_pcie->num_lanes;
0968 
0969     for (i = 0; i < num_lanes; i++) {
0970         ret = phy_reset(ks_pcie->phy[i]);
0971         if (ret < 0)
0972             goto err_phy;
0973 
0974         ret = phy_init(ks_pcie->phy[i]);
0975         if (ret < 0)
0976             goto err_phy;
0977 
0978         ret = phy_power_on(ks_pcie->phy[i]);
0979         if (ret < 0) {
0980             phy_exit(ks_pcie->phy[i]);
0981             goto err_phy;
0982         }
0983     }
0984 
0985     return 0;
0986 
0987 err_phy:
0988     while (--i >= 0) {
0989         phy_power_off(ks_pcie->phy[i]);
0990         phy_exit(ks_pcie->phy[i]);
0991     }
0992 
0993     return ret;
0994 }
0995 
0996 static int ks_pcie_set_mode(struct device *dev)
0997 {
0998     struct device_node *np = dev->of_node;
0999     struct of_phandle_args args;
1000     unsigned int offset = 0;
1001     struct regmap *syscon;
1002     u32 val;
1003     u32 mask;
1004     int ret = 0;
1005 
1006     syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1007     if (IS_ERR(syscon))
1008         return 0;
1009 
1010     /* Do not error out to maintain old DT compatibility */
1011     ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1012     if (!ret)
1013         offset = args.args[0];
1014 
1015     mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
1016     val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
1017 
1018     ret = regmap_update_bits(syscon, offset, mask, val);
1019     if (ret) {
1020         dev_err(dev, "failed to set pcie mode\n");
1021         return ret;
1022     }
1023 
1024     return 0;
1025 }
1026 
1027 static int ks_pcie_am654_set_mode(struct device *dev,
1028                   enum dw_pcie_device_mode mode)
1029 {
1030     struct device_node *np = dev->of_node;
1031     struct of_phandle_args args;
1032     unsigned int offset = 0;
1033     struct regmap *syscon;
1034     u32 val;
1035     u32 mask;
1036     int ret = 0;
1037 
1038     syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
1039     if (IS_ERR(syscon))
1040         return 0;
1041 
1042     /* Do not error out to maintain old DT compatibility */
1043     ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
1044     if (!ret)
1045         offset = args.args[0];
1046 
1047     mask = AM654_PCIE_DEV_TYPE_MASK;
1048 
1049     switch (mode) {
1050     case DW_PCIE_RC_TYPE:
1051         val = RC;
1052         break;
1053     case DW_PCIE_EP_TYPE:
1054         val = EP;
1055         break;
1056     default:
1057         dev_err(dev, "INVALID device type %d\n", mode);
1058         return -EINVAL;
1059     }
1060 
1061     ret = regmap_update_bits(syscon, offset, mask, val);
1062     if (ret) {
1063         dev_err(dev, "failed to set pcie mode\n");
1064         return ret;
1065     }
1066 
1067     return 0;
1068 }
1069 
1070 static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
1071     .host_ops = &ks_pcie_host_ops,
1072     .version = DW_PCIE_VER_365A,
1073 };
1074 
1075 static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
1076     .host_ops = &ks_pcie_am654_host_ops,
1077     .mode = DW_PCIE_RC_TYPE,
1078     .version = DW_PCIE_VER_490A,
1079 };
1080 
1081 static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
1082     .ep_ops = &ks_pcie_am654_ep_ops,
1083     .mode = DW_PCIE_EP_TYPE,
1084     .version = DW_PCIE_VER_490A,
1085 };
1086 
1087 static const struct of_device_id ks_pcie_of_match[] = {
1088     {
1089         .type = "pci",
1090         .data = &ks_pcie_rc_of_data,
1091         .compatible = "ti,keystone-pcie",
1092     },
1093     {
1094         .data = &ks_pcie_am654_rc_of_data,
1095         .compatible = "ti,am654-pcie-rc",
1096     },
1097     {
1098         .data = &ks_pcie_am654_ep_of_data,
1099         .compatible = "ti,am654-pcie-ep",
1100     },
1101     { },
1102 };
1103 
1104 static int __init ks_pcie_probe(struct platform_device *pdev)
1105 {
1106     const struct dw_pcie_host_ops *host_ops;
1107     const struct dw_pcie_ep_ops *ep_ops;
1108     struct device *dev = &pdev->dev;
1109     struct device_node *np = dev->of_node;
1110     const struct ks_pcie_of_data *data;
1111     enum dw_pcie_device_mode mode;
1112     struct dw_pcie *pci;
1113     struct keystone_pcie *ks_pcie;
1114     struct device_link **link;
1115     struct gpio_desc *gpiod;
1116     struct resource *res;
1117     void __iomem *base;
1118     u32 num_viewport;
1119     struct phy **phy;
1120     u32 num_lanes;
1121     char name[10];
1122     u32 version;
1123     int ret;
1124     int irq;
1125     int i;
1126 
1127     data = of_device_get_match_data(dev);
1128     if (!data)
1129         return -EINVAL;
1130 
1131     version = data->version;
1132     host_ops = data->host_ops;
1133     ep_ops = data->ep_ops;
1134     mode = data->mode;
1135 
1136     ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
1137     if (!ks_pcie)
1138         return -ENOMEM;
1139 
1140     pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1141     if (!pci)
1142         return -ENOMEM;
1143 
1144     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
1145     ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
1146     if (IS_ERR(ks_pcie->va_app_base))
1147         return PTR_ERR(ks_pcie->va_app_base);
1148 
1149     ks_pcie->app = *res;
1150 
1151     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
1152     base = devm_pci_remap_cfg_resource(dev, res);
1153     if (IS_ERR(base))
1154         return PTR_ERR(base);
1155 
1156     if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
1157         ks_pcie->is_am6 = true;
1158 
1159     pci->dbi_base = base;
1160     pci->dbi_base2 = base;
1161     pci->dev = dev;
1162     pci->ops = &ks_pcie_dw_pcie_ops;
1163     pci->version = version;
1164 
1165     irq = platform_get_irq(pdev, 0);
1166     if (irq < 0)
1167         return irq;
1168 
1169     ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
1170               "ks-pcie-error-irq", ks_pcie);
1171     if (ret < 0) {
1172         dev_err(dev, "failed to request error IRQ %d\n",
1173             irq);
1174         return ret;
1175     }
1176 
1177     ret = of_property_read_u32(np, "num-lanes", &num_lanes);
1178     if (ret)
1179         num_lanes = 1;
1180 
1181     phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
1182     if (!phy)
1183         return -ENOMEM;
1184 
1185     link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
1186     if (!link)
1187         return -ENOMEM;
1188 
1189     for (i = 0; i < num_lanes; i++) {
1190         snprintf(name, sizeof(name), "pcie-phy%d", i);
1191         phy[i] = devm_phy_optional_get(dev, name);
1192         if (IS_ERR(phy[i])) {
1193             ret = PTR_ERR(phy[i]);
1194             goto err_link;
1195         }
1196 
1197         if (!phy[i])
1198             continue;
1199 
1200         link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
1201         if (!link[i]) {
1202             ret = -EINVAL;
1203             goto err_link;
1204         }
1205     }
1206 
1207     ks_pcie->np = np;
1208     ks_pcie->pci = pci;
1209     ks_pcie->link = link;
1210     ks_pcie->num_lanes = num_lanes;
1211     ks_pcie->phy = phy;
1212 
1213     gpiod = devm_gpiod_get_optional(dev, "reset",
1214                     GPIOD_OUT_LOW);
1215     if (IS_ERR(gpiod)) {
1216         ret = PTR_ERR(gpiod);
1217         if (ret != -EPROBE_DEFER)
1218             dev_err(dev, "Failed to get reset GPIO\n");
1219         goto err_link;
1220     }
1221 
1222     ret = ks_pcie_enable_phy(ks_pcie);
1223     if (ret) {
1224         dev_err(dev, "failed to enable phy\n");
1225         goto err_link;
1226     }
1227 
1228     platform_set_drvdata(pdev, ks_pcie);
1229     pm_runtime_enable(dev);
1230     ret = pm_runtime_get_sync(dev);
1231     if (ret < 0) {
1232         dev_err(dev, "pm_runtime_get_sync failed\n");
1233         goto err_get_sync;
1234     }
1235 
1236     if (dw_pcie_ver_is_ge(pci, 480A))
1237         ret = ks_pcie_am654_set_mode(dev, mode);
1238     else
1239         ret = ks_pcie_set_mode(dev);
1240     if (ret < 0)
1241         goto err_get_sync;
1242 
1243     switch (mode) {
1244     case DW_PCIE_RC_TYPE:
1245         if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
1246             ret = -ENODEV;
1247             goto err_get_sync;
1248         }
1249 
1250         ret = of_property_read_u32(np, "num-viewport", &num_viewport);
1251         if (ret < 0) {
1252             dev_err(dev, "unable to read *num-viewport* property\n");
1253             goto err_get_sync;
1254         }
1255 
1256         /*
1257          * "Power Sequencing and Reset Signal Timings" table in
1258          * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
1259          * indicates PERST# should be deasserted after minimum of 100us
1260          * once REFCLK is stable. The REFCLK to the connector in RC
1261          * mode is selected while enabling the PHY. So deassert PERST#
1262          * after 100 us.
1263          */
1264         if (gpiod) {
1265             usleep_range(100, 200);
1266             gpiod_set_value_cansleep(gpiod, 1);
1267         }
1268 
1269         ks_pcie->num_viewport = num_viewport;
1270         pci->pp.ops = host_ops;
1271         ret = dw_pcie_host_init(&pci->pp);
1272         if (ret < 0)
1273             goto err_get_sync;
1274         break;
1275     case DW_PCIE_EP_TYPE:
1276         if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
1277             ret = -ENODEV;
1278             goto err_get_sync;
1279         }
1280 
1281         pci->ep.ops = ep_ops;
1282         ret = dw_pcie_ep_init(&pci->ep);
1283         if (ret < 0)
1284             goto err_get_sync;
1285         break;
1286     default:
1287         dev_err(dev, "INVALID device type %d\n", mode);
1288     }
1289 
1290     ks_pcie_enable_error_irq(ks_pcie);
1291 
1292     return 0;
1293 
1294 err_get_sync:
1295     pm_runtime_put(dev);
1296     pm_runtime_disable(dev);
1297     ks_pcie_disable_phy(ks_pcie);
1298 
1299 err_link:
1300     while (--i >= 0 && link[i])
1301         device_link_del(link[i]);
1302 
1303     return ret;
1304 }
1305 
1306 static int __exit ks_pcie_remove(struct platform_device *pdev)
1307 {
1308     struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
1309     struct device_link **link = ks_pcie->link;
1310     int num_lanes = ks_pcie->num_lanes;
1311     struct device *dev = &pdev->dev;
1312 
1313     pm_runtime_put(dev);
1314     pm_runtime_disable(dev);
1315     ks_pcie_disable_phy(ks_pcie);
1316     while (num_lanes--)
1317         device_link_del(link[num_lanes]);
1318 
1319     return 0;
1320 }
1321 
1322 static struct platform_driver ks_pcie_driver __refdata = {
1323     .probe  = ks_pcie_probe,
1324     .remove = __exit_p(ks_pcie_remove),
1325     .driver = {
1326         .name   = "keystone-pcie",
1327         .of_match_table = ks_pcie_of_match,
1328     },
1329 };
1330 builtin_platform_driver(ks_pcie_driver);