Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * PCIe host controller driver for NWL PCIe Bridge
0004  * Based on pcie-xilinx.c, pci-tegra.c
0005  *
0006  * (C) Copyright 2014 - 2015, Xilinx, Inc.
0007  */
0008 
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/irq.h>
0013 #include <linux/irqdomain.h>
0014 #include <linux/kernel.h>
0015 #include <linux/init.h>
0016 #include <linux/msi.h>
0017 #include <linux/of_address.h>
0018 #include <linux/of_pci.h>
0019 #include <linux/of_platform.h>
0020 #include <linux/of_irq.h>
0021 #include <linux/pci.h>
0022 #include <linux/pci-ecam.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/irqchip/chained_irq.h>
0025 
0026 #include "../pci.h"
0027 
0028 /* Bridge core config registers */
0029 #define BRCFG_PCIE_RX0          0x00000000
0030 #define BRCFG_PCIE_RX1          0x00000004
0031 #define BRCFG_INTERRUPT         0x00000010
0032 #define BRCFG_PCIE_RX_MSG_FILTER    0x00000020
0033 
0034 /* Egress - Bridge translation registers */
0035 #define E_BREG_CAPABILITIES     0x00000200
0036 #define E_BREG_CONTROL          0x00000208
0037 #define E_BREG_BASE_LO          0x00000210
0038 #define E_BREG_BASE_HI          0x00000214
0039 #define E_ECAM_CAPABILITIES     0x00000220
0040 #define E_ECAM_CONTROL          0x00000228
0041 #define E_ECAM_BASE_LO          0x00000230
0042 #define E_ECAM_BASE_HI          0x00000234
0043 
0044 /* Ingress - address translations */
0045 #define I_MSII_CAPABILITIES     0x00000300
0046 #define I_MSII_CONTROL          0x00000308
0047 #define I_MSII_BASE_LO          0x00000310
0048 #define I_MSII_BASE_HI          0x00000314
0049 
0050 #define I_ISUB_CONTROL          0x000003E8
0051 #define SET_ISUB_CONTROL        BIT(0)
0052 /* Rxed msg fifo  - Interrupt status registers */
0053 #define MSGF_MISC_STATUS        0x00000400
0054 #define MSGF_MISC_MASK          0x00000404
0055 #define MSGF_LEG_STATUS         0x00000420
0056 #define MSGF_LEG_MASK           0x00000424
0057 #define MSGF_MSI_STATUS_LO      0x00000440
0058 #define MSGF_MSI_STATUS_HI      0x00000444
0059 #define MSGF_MSI_MASK_LO        0x00000448
0060 #define MSGF_MSI_MASK_HI        0x0000044C
0061 
0062 /* Msg filter mask bits */
0063 #define CFG_ENABLE_PM_MSG_FWD       BIT(1)
0064 #define CFG_ENABLE_INT_MSG_FWD      BIT(2)
0065 #define CFG_ENABLE_ERR_MSG_FWD      BIT(3)
0066 #define CFG_ENABLE_MSG_FILTER_MASK  (CFG_ENABLE_PM_MSG_FWD | \
0067                     CFG_ENABLE_INT_MSG_FWD | \
0068                     CFG_ENABLE_ERR_MSG_FWD)
0069 
0070 /* Misc interrupt status mask bits */
0071 #define MSGF_MISC_SR_RXMSG_AVAIL    BIT(0)
0072 #define MSGF_MISC_SR_RXMSG_OVER     BIT(1)
0073 #define MSGF_MISC_SR_SLAVE_ERR      BIT(4)
0074 #define MSGF_MISC_SR_MASTER_ERR     BIT(5)
0075 #define MSGF_MISC_SR_I_ADDR_ERR     BIT(6)
0076 #define MSGF_MISC_SR_E_ADDR_ERR     BIT(7)
0077 #define MSGF_MISC_SR_FATAL_AER      BIT(16)
0078 #define MSGF_MISC_SR_NON_FATAL_AER  BIT(17)
0079 #define MSGF_MISC_SR_CORR_AER       BIT(18)
0080 #define MSGF_MISC_SR_UR_DETECT      BIT(20)
0081 #define MSGF_MISC_SR_NON_FATAL_DEV  BIT(22)
0082 #define MSGF_MISC_SR_FATAL_DEV      BIT(23)
0083 #define MSGF_MISC_SR_LINK_DOWN      BIT(24)
0084 #define MSGF_MSIC_SR_LINK_AUTO_BWIDTH   BIT(25)
0085 #define MSGF_MSIC_SR_LINK_BWIDTH    BIT(26)
0086 
0087 #define MSGF_MISC_SR_MASKALL        (MSGF_MISC_SR_RXMSG_AVAIL | \
0088                     MSGF_MISC_SR_RXMSG_OVER | \
0089                     MSGF_MISC_SR_SLAVE_ERR | \
0090                     MSGF_MISC_SR_MASTER_ERR | \
0091                     MSGF_MISC_SR_I_ADDR_ERR | \
0092                     MSGF_MISC_SR_E_ADDR_ERR | \
0093                     MSGF_MISC_SR_FATAL_AER | \
0094                     MSGF_MISC_SR_NON_FATAL_AER | \
0095                     MSGF_MISC_SR_CORR_AER | \
0096                     MSGF_MISC_SR_UR_DETECT | \
0097                     MSGF_MISC_SR_NON_FATAL_DEV | \
0098                     MSGF_MISC_SR_FATAL_DEV | \
0099                     MSGF_MISC_SR_LINK_DOWN | \
0100                     MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
0101                     MSGF_MSIC_SR_LINK_BWIDTH)
0102 
0103 /* Legacy interrupt status mask bits */
0104 #define MSGF_LEG_SR_INTA        BIT(0)
0105 #define MSGF_LEG_SR_INTB        BIT(1)
0106 #define MSGF_LEG_SR_INTC        BIT(2)
0107 #define MSGF_LEG_SR_INTD        BIT(3)
0108 #define MSGF_LEG_SR_MASKALL     (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
0109                     MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
0110 
0111 /* MSI interrupt status mask bits */
0112 #define MSGF_MSI_SR_LO_MASK     GENMASK(31, 0)
0113 #define MSGF_MSI_SR_HI_MASK     GENMASK(31, 0)
0114 
0115 #define MSII_PRESENT            BIT(0)
0116 #define MSII_ENABLE         BIT(0)
0117 #define MSII_STATUS_ENABLE      BIT(15)
0118 
0119 /* Bridge config interrupt mask */
0120 #define BRCFG_INTERRUPT_MASK        BIT(0)
0121 #define BREG_PRESENT            BIT(0)
0122 #define BREG_ENABLE         BIT(0)
0123 #define BREG_ENABLE_FORCE       BIT(1)
0124 
0125 /* E_ECAM status mask bits */
0126 #define E_ECAM_PRESENT          BIT(0)
0127 #define E_ECAM_CR_ENABLE        BIT(0)
0128 #define E_ECAM_SIZE_LOC         GENMASK(20, 16)
0129 #define E_ECAM_SIZE_SHIFT       16
0130 #define NWL_ECAM_VALUE_DEFAULT      12
0131 
0132 #define CFG_DMA_REG_BAR         GENMASK(2, 0)
0133 #define CFG_PCIE_CACHE          GENMASK(7, 0)
0134 
0135 #define INT_PCI_MSI_NR          (2 * 32)
0136 
0137 /* Readin the PS_LINKUP */
0138 #define PS_LINKUP_OFFSET        0x00000238
0139 #define PCIE_PHY_LINKUP_BIT     BIT(0)
0140 #define PHY_RDY_LINKUP_BIT      BIT(1)
0141 
0142 /* Parameters for the waiting for link up routine */
0143 #define LINK_WAIT_MAX_RETRIES          10
0144 #define LINK_WAIT_USLEEP_MIN           90000
0145 #define LINK_WAIT_USLEEP_MAX           100000
0146 
0147 struct nwl_msi {            /* MSI information */
0148     struct irq_domain *msi_domain;
0149     DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
0150     struct irq_domain *dev_domain;
0151     struct mutex lock;      /* protect bitmap variable */
0152     int irq_msi0;
0153     int irq_msi1;
0154 };
0155 
0156 struct nwl_pcie {
0157     struct device *dev;
0158     void __iomem *breg_base;
0159     void __iomem *pcireg_base;
0160     void __iomem *ecam_base;
0161     phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
0162     phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
0163     phys_addr_t phys_ecam_base; /* Physical Configuration Base */
0164     u32 breg_size;
0165     u32 pcie_reg_size;
0166     u32 ecam_size;
0167     int irq_intx;
0168     int irq_misc;
0169     u32 ecam_value;
0170     u8 last_busno;
0171     struct nwl_msi msi;
0172     struct irq_domain *legacy_irq_domain;
0173     struct clk *clk;
0174     raw_spinlock_t leg_mask_lock;
0175 };
0176 
0177 static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
0178 {
0179     return readl(pcie->breg_base + off);
0180 }
0181 
0182 static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
0183 {
0184     writel(val, pcie->breg_base + off);
0185 }
0186 
0187 static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
0188 {
0189     if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
0190         return true;
0191     return false;
0192 }
0193 
0194 static bool nwl_phy_link_up(struct nwl_pcie *pcie)
0195 {
0196     if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
0197         return true;
0198     return false;
0199 }
0200 
0201 static int nwl_wait_for_link(struct nwl_pcie *pcie)
0202 {
0203     struct device *dev = pcie->dev;
0204     int retries;
0205 
0206     /* check if the link is up or not */
0207     for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
0208         if (nwl_phy_link_up(pcie))
0209             return 0;
0210         usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
0211     }
0212 
0213     dev_err(dev, "PHY link never came up\n");
0214     return -ETIMEDOUT;
0215 }
0216 
0217 static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
0218 {
0219     struct nwl_pcie *pcie = bus->sysdata;
0220 
0221     /* Check link before accessing downstream ports */
0222     if (!pci_is_root_bus(bus)) {
0223         if (!nwl_pcie_link_up(pcie))
0224             return false;
0225     } else if (devfn > 0)
0226         /* Only one device down on each root port */
0227         return false;
0228 
0229     return true;
0230 }
0231 
0232 /**
0233  * nwl_pcie_map_bus - Get configuration base
0234  *
0235  * @bus: Bus structure of current bus
0236  * @devfn: Device/function
0237  * @where: Offset from base
0238  *
0239  * Return: Base address of the configuration space needed to be
0240  *     accessed.
0241  */
0242 static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
0243                       int where)
0244 {
0245     struct nwl_pcie *pcie = bus->sysdata;
0246 
0247     if (!nwl_pcie_valid_device(bus, devfn))
0248         return NULL;
0249 
0250     return pcie->ecam_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
0251 }
0252 
0253 /* PCIe operations */
0254 static struct pci_ops nwl_pcie_ops = {
0255     .map_bus = nwl_pcie_map_bus,
0256     .read  = pci_generic_config_read,
0257     .write = pci_generic_config_write,
0258 };
0259 
0260 static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
0261 {
0262     struct nwl_pcie *pcie = data;
0263     struct device *dev = pcie->dev;
0264     u32 misc_stat;
0265 
0266     /* Checking for misc interrupts */
0267     misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
0268                      MSGF_MISC_SR_MASKALL;
0269     if (!misc_stat)
0270         return IRQ_NONE;
0271 
0272     if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
0273         dev_err(dev, "Received Message FIFO Overflow\n");
0274 
0275     if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
0276         dev_err(dev, "Slave error\n");
0277 
0278     if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
0279         dev_err(dev, "Master error\n");
0280 
0281     if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
0282         dev_err(dev, "In Misc Ingress address translation error\n");
0283 
0284     if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
0285         dev_err(dev, "In Misc Egress address translation error\n");
0286 
0287     if (misc_stat & MSGF_MISC_SR_FATAL_AER)
0288         dev_err(dev, "Fatal Error in AER Capability\n");
0289 
0290     if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
0291         dev_err(dev, "Non-Fatal Error in AER Capability\n");
0292 
0293     if (misc_stat & MSGF_MISC_SR_CORR_AER)
0294         dev_err(dev, "Correctable Error in AER Capability\n");
0295 
0296     if (misc_stat & MSGF_MISC_SR_UR_DETECT)
0297         dev_err(dev, "Unsupported request Detected\n");
0298 
0299     if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
0300         dev_err(dev, "Non-Fatal Error Detected\n");
0301 
0302     if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
0303         dev_err(dev, "Fatal Error Detected\n");
0304 
0305     if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
0306         dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
0307 
0308     if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
0309         dev_info(dev, "Link Bandwidth Management Status bit set\n");
0310 
0311     /* Clear misc interrupt status */
0312     nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
0313 
0314     return IRQ_HANDLED;
0315 }
0316 
0317 static void nwl_pcie_leg_handler(struct irq_desc *desc)
0318 {
0319     struct irq_chip *chip = irq_desc_get_chip(desc);
0320     struct nwl_pcie *pcie;
0321     unsigned long status;
0322     u32 bit;
0323 
0324     chained_irq_enter(chip, desc);
0325     pcie = irq_desc_get_handler_data(desc);
0326 
0327     while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
0328                 MSGF_LEG_SR_MASKALL) != 0) {
0329         for_each_set_bit(bit, &status, PCI_NUM_INTX)
0330             generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
0331     }
0332 
0333     chained_irq_exit(chip, desc);
0334 }
0335 
0336 static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
0337 {
0338     struct nwl_msi *msi = &pcie->msi;
0339     unsigned long status;
0340     u32 bit;
0341 
0342     while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
0343         for_each_set_bit(bit, &status, 32) {
0344             nwl_bridge_writel(pcie, 1 << bit, status_reg);
0345             generic_handle_domain_irq(msi->dev_domain, bit);
0346         }
0347     }
0348 }
0349 
0350 static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
0351 {
0352     struct irq_chip *chip = irq_desc_get_chip(desc);
0353     struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
0354 
0355     chained_irq_enter(chip, desc);
0356     nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
0357     chained_irq_exit(chip, desc);
0358 }
0359 
0360 static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
0361 {
0362     struct irq_chip *chip = irq_desc_get_chip(desc);
0363     struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
0364 
0365     chained_irq_enter(chip, desc);
0366     nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
0367     chained_irq_exit(chip, desc);
0368 }
0369 
0370 static void nwl_mask_leg_irq(struct irq_data *data)
0371 {
0372     struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
0373     unsigned long flags;
0374     u32 mask;
0375     u32 val;
0376 
0377     mask = 1 << (data->hwirq - 1);
0378     raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
0379     val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
0380     nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
0381     raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
0382 }
0383 
0384 static void nwl_unmask_leg_irq(struct irq_data *data)
0385 {
0386     struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
0387     unsigned long flags;
0388     u32 mask;
0389     u32 val;
0390 
0391     mask = 1 << (data->hwirq - 1);
0392     raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
0393     val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
0394     nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
0395     raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
0396 }
0397 
0398 static struct irq_chip nwl_leg_irq_chip = {
0399     .name = "nwl_pcie:legacy",
0400     .irq_enable = nwl_unmask_leg_irq,
0401     .irq_disable = nwl_mask_leg_irq,
0402     .irq_mask = nwl_mask_leg_irq,
0403     .irq_unmask = nwl_unmask_leg_irq,
0404 };
0405 
0406 static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
0407               irq_hw_number_t hwirq)
0408 {
0409     irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
0410     irq_set_chip_data(irq, domain->host_data);
0411     irq_set_status_flags(irq, IRQ_LEVEL);
0412 
0413     return 0;
0414 }
0415 
0416 static const struct irq_domain_ops legacy_domain_ops = {
0417     .map = nwl_legacy_map,
0418     .xlate = pci_irqd_intx_xlate,
0419 };
0420 
0421 #ifdef CONFIG_PCI_MSI
0422 static struct irq_chip nwl_msi_irq_chip = {
0423     .name = "nwl_pcie:msi",
0424     .irq_enable = pci_msi_unmask_irq,
0425     .irq_disable = pci_msi_mask_irq,
0426     .irq_mask = pci_msi_mask_irq,
0427     .irq_unmask = pci_msi_unmask_irq,
0428 };
0429 
0430 static struct msi_domain_info nwl_msi_domain_info = {
0431     .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0432           MSI_FLAG_MULTI_PCI_MSI),
0433     .chip = &nwl_msi_irq_chip,
0434 };
0435 #endif
0436 
0437 static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
0438 {
0439     struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
0440     phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
0441 
0442     msg->address_lo = lower_32_bits(msi_addr);
0443     msg->address_hi = upper_32_bits(msi_addr);
0444     msg->data = data->hwirq;
0445 }
0446 
0447 static int nwl_msi_set_affinity(struct irq_data *irq_data,
0448                 const struct cpumask *mask, bool force)
0449 {
0450     return -EINVAL;
0451 }
0452 
0453 static struct irq_chip nwl_irq_chip = {
0454     .name = "Xilinx MSI",
0455     .irq_compose_msi_msg = nwl_compose_msi_msg,
0456     .irq_set_affinity = nwl_msi_set_affinity,
0457 };
0458 
0459 static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
0460                 unsigned int nr_irqs, void *args)
0461 {
0462     struct nwl_pcie *pcie = domain->host_data;
0463     struct nwl_msi *msi = &pcie->msi;
0464     int bit;
0465     int i;
0466 
0467     mutex_lock(&msi->lock);
0468     bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
0469                       get_count_order(nr_irqs));
0470     if (bit < 0) {
0471         mutex_unlock(&msi->lock);
0472         return -ENOSPC;
0473     }
0474 
0475     for (i = 0; i < nr_irqs; i++) {
0476         irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
0477                 domain->host_data, handle_simple_irq,
0478                 NULL, NULL);
0479     }
0480     mutex_unlock(&msi->lock);
0481     return 0;
0482 }
0483 
0484 static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
0485                     unsigned int nr_irqs)
0486 {
0487     struct irq_data *data = irq_domain_get_irq_data(domain, virq);
0488     struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
0489     struct nwl_msi *msi = &pcie->msi;
0490 
0491     mutex_lock(&msi->lock);
0492     bitmap_release_region(msi->bitmap, data->hwirq,
0493                   get_count_order(nr_irqs));
0494     mutex_unlock(&msi->lock);
0495 }
0496 
0497 static const struct irq_domain_ops dev_msi_domain_ops = {
0498     .alloc  = nwl_irq_domain_alloc,
0499     .free   = nwl_irq_domain_free,
0500 };
0501 
0502 static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
0503 {
0504 #ifdef CONFIG_PCI_MSI
0505     struct device *dev = pcie->dev;
0506     struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
0507     struct nwl_msi *msi = &pcie->msi;
0508 
0509     msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
0510                         &dev_msi_domain_ops, pcie);
0511     if (!msi->dev_domain) {
0512         dev_err(dev, "failed to create dev IRQ domain\n");
0513         return -ENOMEM;
0514     }
0515     msi->msi_domain = pci_msi_create_irq_domain(fwnode,
0516                             &nwl_msi_domain_info,
0517                             msi->dev_domain);
0518     if (!msi->msi_domain) {
0519         dev_err(dev, "failed to create msi IRQ domain\n");
0520         irq_domain_remove(msi->dev_domain);
0521         return -ENOMEM;
0522     }
0523 #endif
0524     return 0;
0525 }
0526 
0527 static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
0528 {
0529     struct device *dev = pcie->dev;
0530     struct device_node *node = dev->of_node;
0531     struct device_node *legacy_intc_node;
0532 
0533     legacy_intc_node = of_get_next_child(node, NULL);
0534     if (!legacy_intc_node) {
0535         dev_err(dev, "No legacy intc node found\n");
0536         return -EINVAL;
0537     }
0538 
0539     pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
0540                             PCI_NUM_INTX,
0541                             &legacy_domain_ops,
0542                             pcie);
0543     of_node_put(legacy_intc_node);
0544     if (!pcie->legacy_irq_domain) {
0545         dev_err(dev, "failed to create IRQ domain\n");
0546         return -ENOMEM;
0547     }
0548 
0549     raw_spin_lock_init(&pcie->leg_mask_lock);
0550     nwl_pcie_init_msi_irq_domain(pcie);
0551     return 0;
0552 }
0553 
0554 static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
0555 {
0556     struct device *dev = pcie->dev;
0557     struct platform_device *pdev = to_platform_device(dev);
0558     struct nwl_msi *msi = &pcie->msi;
0559     unsigned long base;
0560     int ret;
0561 
0562     mutex_init(&msi->lock);
0563 
0564     /* Get msi_1 IRQ number */
0565     msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
0566     if (msi->irq_msi1 < 0)
0567         return -EINVAL;
0568 
0569     irq_set_chained_handler_and_data(msi->irq_msi1,
0570                      nwl_pcie_msi_handler_high, pcie);
0571 
0572     /* Get msi_0 IRQ number */
0573     msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
0574     if (msi->irq_msi0 < 0)
0575         return -EINVAL;
0576 
0577     irq_set_chained_handler_and_data(msi->irq_msi0,
0578                      nwl_pcie_msi_handler_low, pcie);
0579 
0580     /* Check for msii_present bit */
0581     ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
0582     if (!ret) {
0583         dev_err(dev, "MSI not present\n");
0584         return -EIO;
0585     }
0586 
0587     /* Enable MSII */
0588     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
0589               MSII_ENABLE, I_MSII_CONTROL);
0590 
0591     /* Enable MSII status */
0592     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
0593               MSII_STATUS_ENABLE, I_MSII_CONTROL);
0594 
0595     /* setup AFI/FPCI range */
0596     base = pcie->phys_pcie_reg_base;
0597     nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
0598     nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
0599 
0600     /*
0601      * For high range MSI interrupts: disable, clear any pending,
0602      * and enable
0603      */
0604     nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI);
0605 
0606     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie,  MSGF_MSI_STATUS_HI) &
0607               MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
0608 
0609     nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
0610 
0611     /*
0612      * For low range MSI interrupts: disable, clear any pending,
0613      * and enable
0614      */
0615     nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO);
0616 
0617     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
0618               MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
0619 
0620     nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
0621 
0622     return 0;
0623 }
0624 
0625 static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
0626 {
0627     struct device *dev = pcie->dev;
0628     struct platform_device *pdev = to_platform_device(dev);
0629     u32 breg_val, ecam_val, first_busno = 0;
0630     int err;
0631 
0632     breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
0633     if (!breg_val) {
0634         dev_err(dev, "BREG is not present\n");
0635         return breg_val;
0636     }
0637 
0638     /* Write bridge_off to breg base */
0639     nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
0640               E_BREG_BASE_LO);
0641     nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
0642               E_BREG_BASE_HI);
0643 
0644     /* Enable BREG */
0645     nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
0646               E_BREG_CONTROL);
0647 
0648     /* Disable DMA channel registers */
0649     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
0650               CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
0651 
0652     /* Enable Ingress subtractive decode translation */
0653     nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
0654 
0655     /* Enable msg filtering details */
0656     nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
0657               BRCFG_PCIE_RX_MSG_FILTER);
0658 
0659     /* This routes the PCIe DMA traffic to go through CCI path */
0660     if (of_dma_is_coherent(dev->of_node))
0661         nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX1) |
0662                   CFG_PCIE_CACHE, BRCFG_PCIE_RX1);
0663 
0664     err = nwl_wait_for_link(pcie);
0665     if (err)
0666         return err;
0667 
0668     ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
0669     if (!ecam_val) {
0670         dev_err(dev, "ECAM is not present\n");
0671         return ecam_val;
0672     }
0673 
0674     /* Enable ECAM */
0675     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
0676               E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
0677 
0678     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
0679               (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
0680               E_ECAM_CONTROL);
0681 
0682     nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
0683               E_ECAM_BASE_LO);
0684     nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
0685               E_ECAM_BASE_HI);
0686 
0687     /* Get bus range */
0688     ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
0689     pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
0690     /* Write primary, secondary and subordinate bus numbers */
0691     ecam_val = first_busno;
0692     ecam_val |= (first_busno + 1) << 8;
0693     ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
0694     writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
0695 
0696     if (nwl_pcie_link_up(pcie))
0697         dev_info(dev, "Link is UP\n");
0698     else
0699         dev_info(dev, "Link is DOWN\n");
0700 
0701     /* Get misc IRQ number */
0702     pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
0703     if (pcie->irq_misc < 0)
0704         return -EINVAL;
0705 
0706     err = devm_request_irq(dev, pcie->irq_misc,
0707                    nwl_pcie_misc_handler, IRQF_SHARED,
0708                    "nwl_pcie:misc", pcie);
0709     if (err) {
0710         dev_err(dev, "fail to register misc IRQ#%d\n",
0711             pcie->irq_misc);
0712         return err;
0713     }
0714 
0715     /* Disable all misc interrupts */
0716     nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
0717 
0718     /* Clear pending misc interrupts */
0719     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
0720               MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
0721 
0722     /* Enable all misc interrupts */
0723     nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
0724 
0725 
0726     /* Disable all legacy interrupts */
0727     nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
0728 
0729     /* Clear pending legacy interrupts */
0730     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
0731               MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
0732 
0733     /* Enable all legacy interrupts */
0734     nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
0735 
0736     /* Enable the bridge config interrupt */
0737     nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
0738               BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
0739 
0740     return 0;
0741 }
0742 
0743 static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
0744                  struct platform_device *pdev)
0745 {
0746     struct device *dev = pcie->dev;
0747     struct resource *res;
0748 
0749     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
0750     pcie->breg_base = devm_ioremap_resource(dev, res);
0751     if (IS_ERR(pcie->breg_base))
0752         return PTR_ERR(pcie->breg_base);
0753     pcie->phys_breg_base = res->start;
0754 
0755     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
0756     pcie->pcireg_base = devm_ioremap_resource(dev, res);
0757     if (IS_ERR(pcie->pcireg_base))
0758         return PTR_ERR(pcie->pcireg_base);
0759     pcie->phys_pcie_reg_base = res->start;
0760 
0761     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
0762     pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
0763     if (IS_ERR(pcie->ecam_base))
0764         return PTR_ERR(pcie->ecam_base);
0765     pcie->phys_ecam_base = res->start;
0766 
0767     /* Get intx IRQ number */
0768     pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
0769     if (pcie->irq_intx < 0)
0770         return pcie->irq_intx;
0771 
0772     irq_set_chained_handler_and_data(pcie->irq_intx,
0773                      nwl_pcie_leg_handler, pcie);
0774 
0775     return 0;
0776 }
0777 
0778 static const struct of_device_id nwl_pcie_of_match[] = {
0779     { .compatible = "xlnx,nwl-pcie-2.11", },
0780     {}
0781 };
0782 
0783 static int nwl_pcie_probe(struct platform_device *pdev)
0784 {
0785     struct device *dev = &pdev->dev;
0786     struct nwl_pcie *pcie;
0787     struct pci_host_bridge *bridge;
0788     int err;
0789 
0790     bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
0791     if (!bridge)
0792         return -ENODEV;
0793 
0794     pcie = pci_host_bridge_priv(bridge);
0795 
0796     pcie->dev = dev;
0797     pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
0798 
0799     err = nwl_pcie_parse_dt(pcie, pdev);
0800     if (err) {
0801         dev_err(dev, "Parsing DT failed\n");
0802         return err;
0803     }
0804 
0805     pcie->clk = devm_clk_get(dev, NULL);
0806     if (IS_ERR(pcie->clk))
0807         return PTR_ERR(pcie->clk);
0808 
0809     err = clk_prepare_enable(pcie->clk);
0810     if (err) {
0811         dev_err(dev, "can't enable PCIe ref clock\n");
0812         return err;
0813     }
0814 
0815     err = nwl_pcie_bridge_init(pcie);
0816     if (err) {
0817         dev_err(dev, "HW Initialization failed\n");
0818         return err;
0819     }
0820 
0821     err = nwl_pcie_init_irq_domain(pcie);
0822     if (err) {
0823         dev_err(dev, "Failed creating IRQ Domain\n");
0824         return err;
0825     }
0826 
0827     bridge->sysdata = pcie;
0828     bridge->ops = &nwl_pcie_ops;
0829 
0830     if (IS_ENABLED(CONFIG_PCI_MSI)) {
0831         err = nwl_pcie_enable_msi(pcie);
0832         if (err < 0) {
0833             dev_err(dev, "failed to enable MSI support: %d\n", err);
0834             return err;
0835         }
0836     }
0837 
0838     return pci_host_probe(bridge);
0839 }
0840 
0841 static struct platform_driver nwl_pcie_driver = {
0842     .driver = {
0843         .name = "nwl-pcie",
0844         .suppress_bind_attrs = true,
0845         .of_match_table = nwl_pcie_of_match,
0846     },
0847     .probe = nwl_pcie_probe,
0848 };
0849 builtin_platform_driver(nwl_pcie_driver);