Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * PCIe host controller driver for Xilinx Versal CPM DMA Bridge
0004  *
0005  * (C) Copyright 2019 - 2020, Xilinx, Inc.
0006  */
0007 
0008 #include <linux/bitfield.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/irq.h>
0011 #include <linux/irqchip.h>
0012 #include <linux/irqchip/chained_irq.h>
0013 #include <linux/irqdomain.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/of_address.h>
0017 #include <linux/of_pci.h>
0018 #include <linux/of_platform.h>
0019 #include <linux/of_irq.h>
0020 #include <linux/pci.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/pci-ecam.h>
0023 
0024 #include "../pci.h"
0025 
0026 /* Register definitions */
0027 #define XILINX_CPM_PCIE_REG_IDR     0x00000E10
0028 #define XILINX_CPM_PCIE_REG_IMR     0x00000E14
0029 #define XILINX_CPM_PCIE_REG_PSCR    0x00000E1C
0030 #define XILINX_CPM_PCIE_REG_RPSC    0x00000E20
0031 #define XILINX_CPM_PCIE_REG_RPEFR   0x00000E2C
0032 #define XILINX_CPM_PCIE_REG_IDRN    0x00000E38
0033 #define XILINX_CPM_PCIE_REG_IDRN_MASK   0x00000E3C
0034 #define XILINX_CPM_PCIE_MISC_IR_STATUS  0x00000340
0035 #define XILINX_CPM_PCIE_MISC_IR_ENABLE  0x00000348
0036 #define XILINX_CPM_PCIE_MISC_IR_LOCAL   BIT(1)
0037 
0038 #define XILINX_CPM_PCIE_IR_STATUS       0x000002A0
0039 #define XILINX_CPM_PCIE_IR_ENABLE       0x000002A8
0040 #define XILINX_CPM_PCIE_IR_LOCAL        BIT(0)
0041 
0042 /* Interrupt registers definitions */
0043 #define XILINX_CPM_PCIE_INTR_LINK_DOWN      0
0044 #define XILINX_CPM_PCIE_INTR_HOT_RESET      3
0045 #define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT   4
0046 #define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT    8
0047 #define XILINX_CPM_PCIE_INTR_CORRECTABLE    9
0048 #define XILINX_CPM_PCIE_INTR_NONFATAL       10
0049 #define XILINX_CPM_PCIE_INTR_FATAL      11
0050 #define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
0051 #define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD    15
0052 #define XILINX_CPM_PCIE_INTR_INTX       16
0053 #define XILINX_CPM_PCIE_INTR_PM_PME_RCVD    17
0054 #define XILINX_CPM_PCIE_INTR_SLV_UNSUPP     20
0055 #define XILINX_CPM_PCIE_INTR_SLV_UNEXP      21
0056 #define XILINX_CPM_PCIE_INTR_SLV_COMPL      22
0057 #define XILINX_CPM_PCIE_INTR_SLV_ERRP       23
0058 #define XILINX_CPM_PCIE_INTR_SLV_CMPABT     24
0059 #define XILINX_CPM_PCIE_INTR_SLV_ILLBUR     25
0060 #define XILINX_CPM_PCIE_INTR_MST_DECERR     26
0061 #define XILINX_CPM_PCIE_INTR_MST_SLVERR     27
0062 #define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT   28
0063 
0064 #define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
0065 
0066 #define XILINX_CPM_PCIE_IMR_ALL_MASK            \
0067     (                       \
0068         IMR(LINK_DOWN)      |       \
0069         IMR(HOT_RESET)      |       \
0070         IMR(CFG_PCIE_TIMEOUT)   |       \
0071         IMR(CFG_TIMEOUT)    |       \
0072         IMR(CORRECTABLE)    |       \
0073         IMR(NONFATAL)       |       \
0074         IMR(FATAL)      |       \
0075         IMR(CFG_ERR_POISON) |       \
0076         IMR(PME_TO_ACK_RCVD)    |       \
0077         IMR(INTX)       |       \
0078         IMR(PM_PME_RCVD)    |       \
0079         IMR(SLV_UNSUPP)     |       \
0080         IMR(SLV_UNEXP)      |       \
0081         IMR(SLV_COMPL)      |       \
0082         IMR(SLV_ERRP)       |       \
0083         IMR(SLV_CMPABT)     |       \
0084         IMR(SLV_ILLBUR)     |       \
0085         IMR(MST_DECERR)     |       \
0086         IMR(MST_SLVERR)     |       \
0087         IMR(SLV_PCIE_TIMEOUT)           \
0088     )
0089 
0090 #define XILINX_CPM_PCIE_IDR_ALL_MASK        0xFFFFFFFF
0091 #define XILINX_CPM_PCIE_IDRN_MASK       GENMASK(19, 16)
0092 #define XILINX_CPM_PCIE_IDRN_SHIFT      16
0093 
0094 /* Root Port Error FIFO Read Register definitions */
0095 #define XILINX_CPM_PCIE_RPEFR_ERR_VALID     BIT(18)
0096 #define XILINX_CPM_PCIE_RPEFR_REQ_ID        GENMASK(15, 0)
0097 #define XILINX_CPM_PCIE_RPEFR_ALL_MASK      0xFFFFFFFF
0098 
0099 /* Root Port Status/control Register definitions */
0100 #define XILINX_CPM_PCIE_REG_RPSC_BEN        BIT(0)
0101 
0102 /* Phy Status/Control Register definitions */
0103 #define XILINX_CPM_PCIE_REG_PSCR_LNKUP      BIT(11)
0104 
0105 enum xilinx_cpm_version {
0106     CPM,
0107     CPM5,
0108 };
0109 
0110 /**
0111  * struct xilinx_cpm_variant - CPM variant information
0112  * @version: CPM version
0113  */
0114 struct xilinx_cpm_variant {
0115     enum xilinx_cpm_version version;
0116 };
0117 
0118 /**
0119  * struct xilinx_cpm_pcie - PCIe port information
0120  * @dev: Device pointer
0121  * @reg_base: Bridge Register Base
0122  * @cpm_base: CPM System Level Control and Status Register(SLCR) Base
0123  * @intx_domain: Legacy IRQ domain pointer
0124  * @cpm_domain: CPM IRQ domain pointer
0125  * @cfg: Holds mappings of config space window
0126  * @intx_irq: legacy interrupt number
0127  * @irq: Error interrupt number
0128  * @lock: lock protecting shared register access
0129  * @variant: CPM version check pointer
0130  */
0131 struct xilinx_cpm_pcie {
0132     struct device           *dev;
0133     void __iomem            *reg_base;
0134     void __iomem            *cpm_base;
0135     struct irq_domain       *intx_domain;
0136     struct irq_domain       *cpm_domain;
0137     struct pci_config_window    *cfg;
0138     int             intx_irq;
0139     int             irq;
0140     raw_spinlock_t          lock;
0141     const struct xilinx_cpm_variant   *variant;
0142 };
0143 
0144 static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
0145 {
0146     return readl_relaxed(port->reg_base + reg);
0147 }
0148 
0149 static void pcie_write(struct xilinx_cpm_pcie *port,
0150                u32 val, u32 reg)
0151 {
0152     writel_relaxed(val, port->reg_base + reg);
0153 }
0154 
0155 static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port)
0156 {
0157     return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
0158         XILINX_CPM_PCIE_REG_PSCR_LNKUP);
0159 }
0160 
0161 static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port)
0162 {
0163     unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
0164 
0165     if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) {
0166         dev_dbg(port->dev, "Requester ID %lu\n",
0167             val & XILINX_CPM_PCIE_RPEFR_REQ_ID);
0168         pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK,
0169                XILINX_CPM_PCIE_REG_RPEFR);
0170     }
0171 }
0172 
0173 static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
0174 {
0175     struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
0176     unsigned long flags;
0177     u32 mask;
0178     u32 val;
0179 
0180     mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
0181     raw_spin_lock_irqsave(&port->lock, flags);
0182     val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
0183     pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK);
0184     raw_spin_unlock_irqrestore(&port->lock, flags);
0185 }
0186 
0187 static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
0188 {
0189     struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
0190     unsigned long flags;
0191     u32 mask;
0192     u32 val;
0193 
0194     mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT);
0195     raw_spin_lock_irqsave(&port->lock, flags);
0196     val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK);
0197     pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK);
0198     raw_spin_unlock_irqrestore(&port->lock, flags);
0199 }
0200 
0201 static struct irq_chip xilinx_cpm_leg_irq_chip = {
0202     .name       = "INTx",
0203     .irq_mask   = xilinx_cpm_mask_leg_irq,
0204     .irq_unmask = xilinx_cpm_unmask_leg_irq,
0205 };
0206 
0207 /**
0208  * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
0209  * @domain: IRQ domain
0210  * @irq: Virtual IRQ number
0211  * @hwirq: HW interrupt number
0212  *
0213  * Return: Always returns 0.
0214  */
0215 static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain,
0216                     unsigned int irq, irq_hw_number_t hwirq)
0217 {
0218     irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip,
0219                  handle_level_irq);
0220     irq_set_chip_data(irq, domain->host_data);
0221     irq_set_status_flags(irq, IRQ_LEVEL);
0222 
0223     return 0;
0224 }
0225 
0226 /* INTx IRQ Domain operations */
0227 static const struct irq_domain_ops intx_domain_ops = {
0228     .map = xilinx_cpm_pcie_intx_map,
0229 };
0230 
0231 static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
0232 {
0233     struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
0234     struct irq_chip *chip = irq_desc_get_chip(desc);
0235     unsigned long val;
0236     int i;
0237 
0238     chained_irq_enter(chip, desc);
0239 
0240     val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK,
0241             pcie_read(port, XILINX_CPM_PCIE_REG_IDRN));
0242 
0243     for_each_set_bit(i, &val, PCI_NUM_INTX)
0244         generic_handle_domain_irq(port->intx_domain, i);
0245 
0246     chained_irq_exit(chip, desc);
0247 }
0248 
0249 static void xilinx_cpm_mask_event_irq(struct irq_data *d)
0250 {
0251     struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
0252     u32 val;
0253 
0254     raw_spin_lock(&port->lock);
0255     val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
0256     val &= ~BIT(d->hwirq);
0257     pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
0258     raw_spin_unlock(&port->lock);
0259 }
0260 
0261 static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
0262 {
0263     struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
0264     u32 val;
0265 
0266     raw_spin_lock(&port->lock);
0267     val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
0268     val |= BIT(d->hwirq);
0269     pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR);
0270     raw_spin_unlock(&port->lock);
0271 }
0272 
0273 static struct irq_chip xilinx_cpm_event_irq_chip = {
0274     .name       = "RC-Event",
0275     .irq_mask   = xilinx_cpm_mask_event_irq,
0276     .irq_unmask = xilinx_cpm_unmask_event_irq,
0277 };
0278 
0279 static int xilinx_cpm_pcie_event_map(struct irq_domain *domain,
0280                      unsigned int irq, irq_hw_number_t hwirq)
0281 {
0282     irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip,
0283                  handle_level_irq);
0284     irq_set_chip_data(irq, domain->host_data);
0285     irq_set_status_flags(irq, IRQ_LEVEL);
0286     return 0;
0287 }
0288 
0289 static const struct irq_domain_ops event_domain_ops = {
0290     .map = xilinx_cpm_pcie_event_map,
0291 };
0292 
0293 static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
0294 {
0295     struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
0296     struct irq_chip *chip = irq_desc_get_chip(desc);
0297     unsigned long val;
0298     int i;
0299 
0300     chained_irq_enter(chip, desc);
0301     val =  pcie_read(port, XILINX_CPM_PCIE_REG_IDR);
0302     val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR);
0303     for_each_set_bit(i, &val, 32)
0304         generic_handle_domain_irq(port->cpm_domain, i);
0305     pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
0306 
0307     if (port->variant->version == CPM5) {
0308         val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
0309         if (val)
0310             writel_relaxed(val, port->cpm_base +
0311                         XILINX_CPM_PCIE_IR_STATUS);
0312     }
0313 
0314     /*
0315      * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to
0316      * CPM SLCR block.
0317      */
0318     val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
0319     if (val)
0320         writel_relaxed(val,
0321                    port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS);
0322 
0323     chained_irq_exit(chip, desc);
0324 }
0325 
0326 #define _IC(x, s)                              \
0327     [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
0328 
0329 static const struct {
0330     const char      *sym;
0331     const char      *str;
0332 } intr_cause[32] = {
0333     _IC(LINK_DOWN,      "Link Down"),
0334     _IC(HOT_RESET,      "Hot reset"),
0335     _IC(CFG_TIMEOUT,    "ECAM access timeout"),
0336     _IC(CORRECTABLE,    "Correctable error message"),
0337     _IC(NONFATAL,       "Non fatal error message"),
0338     _IC(FATAL,      "Fatal error message"),
0339     _IC(SLV_UNSUPP,     "Slave unsupported request"),
0340     _IC(SLV_UNEXP,      "Slave unexpected completion"),
0341     _IC(SLV_COMPL,      "Slave completion timeout"),
0342     _IC(SLV_ERRP,       "Slave Error Poison"),
0343     _IC(SLV_CMPABT,     "Slave Completer Abort"),
0344     _IC(SLV_ILLBUR,     "Slave Illegal Burst"),
0345     _IC(MST_DECERR,     "Master decode error"),
0346     _IC(MST_SLVERR,     "Master slave error"),
0347     _IC(CFG_PCIE_TIMEOUT,   "PCIe ECAM access timeout"),
0348     _IC(CFG_ERR_POISON, "ECAM poisoned completion received"),
0349     _IC(PME_TO_ACK_RCVD,    "PME_TO_ACK message received"),
0350     _IC(PM_PME_RCVD,    "PM_PME message received"),
0351     _IC(SLV_PCIE_TIMEOUT,   "PCIe completion timeout received"),
0352 };
0353 
0354 static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
0355 {
0356     struct xilinx_cpm_pcie *port = dev_id;
0357     struct device *dev = port->dev;
0358     struct irq_data *d;
0359 
0360     d = irq_domain_get_irq_data(port->cpm_domain, irq);
0361 
0362     switch (d->hwirq) {
0363     case XILINX_CPM_PCIE_INTR_CORRECTABLE:
0364     case XILINX_CPM_PCIE_INTR_NONFATAL:
0365     case XILINX_CPM_PCIE_INTR_FATAL:
0366         cpm_pcie_clear_err_interrupts(port);
0367         fallthrough;
0368 
0369     default:
0370         if (intr_cause[d->hwirq].str)
0371             dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
0372         else
0373             dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
0374     }
0375 
0376     return IRQ_HANDLED;
0377 }
0378 
0379 static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port)
0380 {
0381     if (port->intx_domain) {
0382         irq_domain_remove(port->intx_domain);
0383         port->intx_domain = NULL;
0384     }
0385 
0386     if (port->cpm_domain) {
0387         irq_domain_remove(port->cpm_domain);
0388         port->cpm_domain = NULL;
0389     }
0390 }
0391 
0392 /**
0393  * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain
0394  * @port: PCIe port information
0395  *
0396  * Return: '0' on success and error value on failure
0397  */
0398 static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
0399 {
0400     struct device *dev = port->dev;
0401     struct device_node *node = dev->of_node;
0402     struct device_node *pcie_intc_node;
0403 
0404     /* Setup INTx */
0405     pcie_intc_node = of_get_next_child(node, NULL);
0406     if (!pcie_intc_node) {
0407         dev_err(dev, "No PCIe Intc node found\n");
0408         return -EINVAL;
0409     }
0410 
0411     port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
0412                          &event_domain_ops,
0413                          port);
0414     if (!port->cpm_domain)
0415         goto out;
0416 
0417     irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
0418 
0419     port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
0420                           &intx_domain_ops,
0421                           port);
0422     if (!port->intx_domain)
0423         goto out;
0424 
0425     irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
0426 
0427     of_node_put(pcie_intc_node);
0428     raw_spin_lock_init(&port->lock);
0429 
0430     return 0;
0431 out:
0432     xilinx_cpm_free_irq_domains(port);
0433     of_node_put(pcie_intc_node);
0434     dev_err(dev, "Failed to allocate IRQ domains\n");
0435 
0436     return -ENOMEM;
0437 }
0438 
0439 static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
0440 {
0441     struct device *dev = port->dev;
0442     struct platform_device *pdev = to_platform_device(dev);
0443     int i, irq;
0444 
0445     port->irq = platform_get_irq(pdev, 0);
0446     if (port->irq < 0)
0447         return port->irq;
0448 
0449     for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
0450         int err;
0451 
0452         if (!intr_cause[i].str)
0453             continue;
0454 
0455         irq = irq_create_mapping(port->cpm_domain, i);
0456         if (!irq) {
0457             dev_err(dev, "Failed to map interrupt\n");
0458             return -ENXIO;
0459         }
0460 
0461         err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler,
0462                        0, intr_cause[i].sym, port);
0463         if (err) {
0464             dev_err(dev, "Failed to request IRQ %d\n", irq);
0465             return err;
0466         }
0467     }
0468 
0469     port->intx_irq = irq_create_mapping(port->cpm_domain,
0470                         XILINX_CPM_PCIE_INTR_INTX);
0471     if (!port->intx_irq) {
0472         dev_err(dev, "Failed to map INTx interrupt\n");
0473         return -ENXIO;
0474     }
0475 
0476     /* Plug the INTx chained handler */
0477     irq_set_chained_handler_and_data(port->intx_irq,
0478                      xilinx_cpm_pcie_intx_flow, port);
0479 
0480     /* Plug the main event chained handler */
0481     irq_set_chained_handler_and_data(port->irq,
0482                      xilinx_cpm_pcie_event_flow, port);
0483 
0484     return 0;
0485 }
0486 
0487 /**
0488  * xilinx_cpm_pcie_init_port - Initialize hardware
0489  * @port: PCIe port information
0490  */
0491 static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
0492 {
0493     if (cpm_pcie_link_up(port))
0494         dev_info(port->dev, "PCIe Link is UP\n");
0495     else
0496         dev_info(port->dev, "PCIe Link is DOWN\n");
0497 
0498     /* Disable all interrupts */
0499     pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK,
0500            XILINX_CPM_PCIE_REG_IMR);
0501 
0502     /* Clear pending interrupts */
0503     pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) &
0504            XILINX_CPM_PCIE_IMR_ALL_MASK,
0505            XILINX_CPM_PCIE_REG_IDR);
0506 
0507     /*
0508      * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
0509      * CPM SLCR block.
0510      */
0511     writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
0512            port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
0513 
0514     if (port->variant->version == CPM5) {
0515         writel(XILINX_CPM_PCIE_IR_LOCAL,
0516                port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
0517     }
0518 
0519     /* Enable the Bridge enable bit */
0520     pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
0521            XILINX_CPM_PCIE_REG_RPSC_BEN,
0522            XILINX_CPM_PCIE_REG_RPSC);
0523 }
0524 
0525 /**
0526  * xilinx_cpm_pcie_parse_dt - Parse Device tree
0527  * @port: PCIe port information
0528  * @bus_range: Bus resource
0529  *
0530  * Return: '0' on success and error value on failure
0531  */
0532 static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
0533                     struct resource *bus_range)
0534 {
0535     struct device *dev = port->dev;
0536     struct platform_device *pdev = to_platform_device(dev);
0537     struct resource *res;
0538 
0539     port->cpm_base = devm_platform_ioremap_resource_byname(pdev,
0540                                    "cpm_slcr");
0541     if (IS_ERR(port->cpm_base))
0542         return PTR_ERR(port->cpm_base);
0543 
0544     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
0545     if (!res)
0546         return -ENXIO;
0547 
0548     port->cfg = pci_ecam_create(dev, res, bus_range,
0549                     &pci_generic_ecam_ops);
0550     if (IS_ERR(port->cfg))
0551         return PTR_ERR(port->cfg);
0552 
0553     if (port->variant->version == CPM5) {
0554         port->reg_base = devm_platform_ioremap_resource_byname(pdev,
0555                                     "cpm_csr");
0556         if (IS_ERR(port->reg_base))
0557             return PTR_ERR(port->reg_base);
0558     } else {
0559         port->reg_base = port->cfg->win;
0560     }
0561 
0562     return 0;
0563 }
0564 
0565 static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port)
0566 {
0567     irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
0568     irq_set_chained_handler_and_data(port->irq, NULL, NULL);
0569 }
0570 
0571 /**
0572  * xilinx_cpm_pcie_probe - Probe function
0573  * @pdev: Platform device pointer
0574  *
0575  * Return: '0' on success and error value on failure
0576  */
0577 static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
0578 {
0579     struct xilinx_cpm_pcie *port;
0580     struct device *dev = &pdev->dev;
0581     struct pci_host_bridge *bridge;
0582     struct resource_entry *bus;
0583     int err;
0584 
0585     bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
0586     if (!bridge)
0587         return -ENODEV;
0588 
0589     port = pci_host_bridge_priv(bridge);
0590 
0591     port->dev = dev;
0592 
0593     err = xilinx_cpm_pcie_init_irq_domain(port);
0594     if (err)
0595         return err;
0596 
0597     bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
0598     if (!bus)
0599         return -ENODEV;
0600 
0601     port->variant = of_device_get_match_data(dev);
0602 
0603     err = xilinx_cpm_pcie_parse_dt(port, bus->res);
0604     if (err) {
0605         dev_err(dev, "Parsing DT failed\n");
0606         goto err_parse_dt;
0607     }
0608 
0609     xilinx_cpm_pcie_init_port(port);
0610 
0611     err = xilinx_cpm_setup_irq(port);
0612     if (err) {
0613         dev_err(dev, "Failed to set up interrupts\n");
0614         goto err_setup_irq;
0615     }
0616 
0617     bridge->sysdata = port->cfg;
0618     bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
0619 
0620     err = pci_host_probe(bridge);
0621     if (err < 0)
0622         goto err_host_bridge;
0623 
0624     return 0;
0625 
0626 err_host_bridge:
0627     xilinx_cpm_free_interrupts(port);
0628 err_setup_irq:
0629     pci_ecam_free(port->cfg);
0630 err_parse_dt:
0631     xilinx_cpm_free_irq_domains(port);
0632     return err;
0633 }
0634 
0635 static const struct xilinx_cpm_variant cpm_host = {
0636     .version = CPM,
0637 };
0638 
0639 static const struct xilinx_cpm_variant cpm5_host = {
0640     .version = CPM5,
0641 };
0642 
0643 static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
0644     {
0645         .compatible = "xlnx,versal-cpm-host-1.00",
0646         .data = &cpm_host,
0647     },
0648     {
0649         .compatible = "xlnx,versal-cpm5-host",
0650         .data = &cpm5_host,
0651     },
0652     {}
0653 };
0654 
0655 static struct platform_driver xilinx_cpm_pcie_driver = {
0656     .driver = {
0657         .name = "xilinx-cpm-pcie",
0658         .of_match_table = xilinx_cpm_pcie_of_match,
0659         .suppress_bind_attrs = true,
0660     },
0661     .probe = xilinx_cpm_pcie_probe,
0662 };
0663 
0664 builtin_platform_driver(xilinx_cpm_pcie_driver);