Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Synopsys DesignWare PCIe host controller driver
0004  *
0005  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
0006  *      https://www.samsung.com
0007  *
0008  * Author: Jingoo Han <jg1.han@samsung.com>
0009  */
0010 
0011 #include <linux/irqchip/chained_irq.h>
0012 #include <linux/irqdomain.h>
0013 #include <linux/msi.h>
0014 #include <linux/of_address.h>
0015 #include <linux/of_pci.h>
0016 #include <linux/pci_regs.h>
0017 #include <linux/platform_device.h>
0018 
0019 #include "../../pci.h"
0020 #include "pcie-designware.h"
0021 
0022 static struct pci_ops dw_pcie_ops;
0023 static struct pci_ops dw_child_pcie_ops;
0024 
0025 static void dw_msi_ack_irq(struct irq_data *d)
0026 {
0027     irq_chip_ack_parent(d);
0028 }
0029 
0030 static void dw_msi_mask_irq(struct irq_data *d)
0031 {
0032     pci_msi_mask_irq(d);
0033     irq_chip_mask_parent(d);
0034 }
0035 
0036 static void dw_msi_unmask_irq(struct irq_data *d)
0037 {
0038     pci_msi_unmask_irq(d);
0039     irq_chip_unmask_parent(d);
0040 }
0041 
0042 static struct irq_chip dw_pcie_msi_irq_chip = {
0043     .name = "PCI-MSI",
0044     .irq_ack = dw_msi_ack_irq,
0045     .irq_mask = dw_msi_mask_irq,
0046     .irq_unmask = dw_msi_unmask_irq,
0047 };
0048 
0049 static struct msi_domain_info dw_pcie_msi_domain_info = {
0050     .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0051            MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
0052     .chip   = &dw_pcie_msi_irq_chip,
0053 };
0054 
0055 /* MSI int handler */
0056 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
0057 {
0058     int i, pos;
0059     unsigned long val;
0060     u32 status, num_ctrls;
0061     irqreturn_t ret = IRQ_NONE;
0062     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0063 
0064     num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
0065 
0066     for (i = 0; i < num_ctrls; i++) {
0067         status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
0068                        (i * MSI_REG_CTRL_BLOCK_SIZE));
0069         if (!status)
0070             continue;
0071 
0072         ret = IRQ_HANDLED;
0073         val = status;
0074         pos = 0;
0075         while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
0076                         pos)) != MAX_MSI_IRQS_PER_CTRL) {
0077             generic_handle_domain_irq(pp->irq_domain,
0078                           (i * MAX_MSI_IRQS_PER_CTRL) +
0079                           pos);
0080             pos++;
0081         }
0082     }
0083 
0084     return ret;
0085 }
0086 
0087 /* Chained MSI interrupt service routine */
0088 static void dw_chained_msi_isr(struct irq_desc *desc)
0089 {
0090     struct irq_chip *chip = irq_desc_get_chip(desc);
0091     struct dw_pcie_rp *pp;
0092 
0093     chained_irq_enter(chip, desc);
0094 
0095     pp = irq_desc_get_handler_data(desc);
0096     dw_handle_msi_irq(pp);
0097 
0098     chained_irq_exit(chip, desc);
0099 }
0100 
0101 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
0102 {
0103     struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
0104     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0105     u64 msi_target;
0106 
0107     msi_target = (u64)pp->msi_data;
0108 
0109     msg->address_lo = lower_32_bits(msi_target);
0110     msg->address_hi = upper_32_bits(msi_target);
0111 
0112     msg->data = d->hwirq;
0113 
0114     dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
0115         (int)d->hwirq, msg->address_hi, msg->address_lo);
0116 }
0117 
0118 static int dw_pci_msi_set_affinity(struct irq_data *d,
0119                    const struct cpumask *mask, bool force)
0120 {
0121     return -EINVAL;
0122 }
0123 
0124 static void dw_pci_bottom_mask(struct irq_data *d)
0125 {
0126     struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
0127     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0128     unsigned int res, bit, ctrl;
0129     unsigned long flags;
0130 
0131     raw_spin_lock_irqsave(&pp->lock, flags);
0132 
0133     ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
0134     res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
0135     bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
0136 
0137     pp->irq_mask[ctrl] |= BIT(bit);
0138     dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
0139 
0140     raw_spin_unlock_irqrestore(&pp->lock, flags);
0141 }
0142 
0143 static void dw_pci_bottom_unmask(struct irq_data *d)
0144 {
0145     struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
0146     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0147     unsigned int res, bit, ctrl;
0148     unsigned long flags;
0149 
0150     raw_spin_lock_irqsave(&pp->lock, flags);
0151 
0152     ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
0153     res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
0154     bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
0155 
0156     pp->irq_mask[ctrl] &= ~BIT(bit);
0157     dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
0158 
0159     raw_spin_unlock_irqrestore(&pp->lock, flags);
0160 }
0161 
0162 static void dw_pci_bottom_ack(struct irq_data *d)
0163 {
0164     struct dw_pcie_rp *pp  = irq_data_get_irq_chip_data(d);
0165     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0166     unsigned int res, bit, ctrl;
0167 
0168     ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
0169     res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
0170     bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
0171 
0172     dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
0173 }
0174 
0175 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
0176     .name = "DWPCI-MSI",
0177     .irq_ack = dw_pci_bottom_ack,
0178     .irq_compose_msi_msg = dw_pci_setup_msi_msg,
0179     .irq_set_affinity = dw_pci_msi_set_affinity,
0180     .irq_mask = dw_pci_bottom_mask,
0181     .irq_unmask = dw_pci_bottom_unmask,
0182 };
0183 
0184 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
0185                     unsigned int virq, unsigned int nr_irqs,
0186                     void *args)
0187 {
0188     struct dw_pcie_rp *pp = domain->host_data;
0189     unsigned long flags;
0190     u32 i;
0191     int bit;
0192 
0193     raw_spin_lock_irqsave(&pp->lock, flags);
0194 
0195     bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
0196                       order_base_2(nr_irqs));
0197 
0198     raw_spin_unlock_irqrestore(&pp->lock, flags);
0199 
0200     if (bit < 0)
0201         return -ENOSPC;
0202 
0203     for (i = 0; i < nr_irqs; i++)
0204         irq_domain_set_info(domain, virq + i, bit + i,
0205                     pp->msi_irq_chip,
0206                     pp, handle_edge_irq,
0207                     NULL, NULL);
0208 
0209     return 0;
0210 }
0211 
0212 static void dw_pcie_irq_domain_free(struct irq_domain *domain,
0213                     unsigned int virq, unsigned int nr_irqs)
0214 {
0215     struct irq_data *d = irq_domain_get_irq_data(domain, virq);
0216     struct dw_pcie_rp *pp = domain->host_data;
0217     unsigned long flags;
0218 
0219     raw_spin_lock_irqsave(&pp->lock, flags);
0220 
0221     bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
0222                   order_base_2(nr_irqs));
0223 
0224     raw_spin_unlock_irqrestore(&pp->lock, flags);
0225 }
0226 
0227 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
0228     .alloc  = dw_pcie_irq_domain_alloc,
0229     .free   = dw_pcie_irq_domain_free,
0230 };
0231 
0232 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
0233 {
0234     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0235     struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
0236 
0237     pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
0238                            &dw_pcie_msi_domain_ops, pp);
0239     if (!pp->irq_domain) {
0240         dev_err(pci->dev, "Failed to create IRQ domain\n");
0241         return -ENOMEM;
0242     }
0243 
0244     irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
0245 
0246     pp->msi_domain = pci_msi_create_irq_domain(fwnode,
0247                            &dw_pcie_msi_domain_info,
0248                            pp->irq_domain);
0249     if (!pp->msi_domain) {
0250         dev_err(pci->dev, "Failed to create MSI domain\n");
0251         irq_domain_remove(pp->irq_domain);
0252         return -ENOMEM;
0253     }
0254 
0255     return 0;
0256 }
0257 
0258 static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
0259 {
0260     u32 ctrl;
0261 
0262     for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
0263         if (pp->msi_irq[ctrl] > 0)
0264             irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
0265                              NULL, NULL);
0266     }
0267 
0268     irq_domain_remove(pp->msi_domain);
0269     irq_domain_remove(pp->irq_domain);
0270 
0271     if (pp->msi_data) {
0272         struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0273         struct device *dev = pci->dev;
0274 
0275         dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
0276         if (pp->msi_page)
0277             __free_page(pp->msi_page);
0278     }
0279 }
0280 
0281 static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
0282 {
0283     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0284     u64 msi_target = (u64)pp->msi_data;
0285 
0286     if (!pci_msi_enabled() || !pp->has_msi_ctrl)
0287         return;
0288 
0289     /* Program the msi_data */
0290     dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
0291     dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
0292 }
0293 
0294 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
0295 {
0296     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0297     struct device *dev = pci->dev;
0298     struct platform_device *pdev = to_platform_device(dev);
0299     u32 ctrl, max_vectors;
0300     int irq;
0301 
0302     /* Parse any "msiX" IRQs described in the devicetree */
0303     for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
0304         char msi_name[] = "msiX";
0305 
0306         msi_name[3] = '0' + ctrl;
0307         irq = platform_get_irq_byname_optional(pdev, msi_name);
0308         if (irq == -ENXIO)
0309             break;
0310         if (irq < 0)
0311             return dev_err_probe(dev, irq,
0312                          "Failed to parse MSI IRQ '%s'\n",
0313                          msi_name);
0314 
0315         pp->msi_irq[ctrl] = irq;
0316     }
0317 
0318     /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
0319     if (ctrl == 0)
0320         return -ENXIO;
0321 
0322     max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
0323     if (pp->num_vectors > max_vectors) {
0324         dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
0325              max_vectors);
0326         pp->num_vectors = max_vectors;
0327     }
0328     if (!pp->num_vectors)
0329         pp->num_vectors = max_vectors;
0330 
0331     return 0;
0332 }
0333 
0334 static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
0335 {
0336     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0337     struct device *dev = pci->dev;
0338     struct platform_device *pdev = to_platform_device(dev);
0339     int ret;
0340     u32 ctrl, num_ctrls;
0341 
0342     for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
0343         pp->irq_mask[ctrl] = ~0;
0344 
0345     if (!pp->msi_irq[0]) {
0346         ret = dw_pcie_parse_split_msi_irq(pp);
0347         if (ret < 0 && ret != -ENXIO)
0348             return ret;
0349     }
0350 
0351     if (!pp->num_vectors)
0352         pp->num_vectors = MSI_DEF_NUM_VECTORS;
0353     num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
0354 
0355     if (!pp->msi_irq[0]) {
0356         pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
0357         if (pp->msi_irq[0] < 0) {
0358             pp->msi_irq[0] = platform_get_irq(pdev, 0);
0359             if (pp->msi_irq[0] < 0)
0360                 return pp->msi_irq[0];
0361         }
0362     }
0363 
0364     dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
0365 
0366     pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
0367 
0368     ret = dw_pcie_allocate_domains(pp);
0369     if (ret)
0370         return ret;
0371 
0372     for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
0373         if (pp->msi_irq[ctrl] > 0)
0374             irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
0375                             dw_chained_msi_isr, pp);
0376     }
0377 
0378     ret = dma_set_mask(dev, DMA_BIT_MASK(32));
0379     if (ret)
0380         dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
0381 
0382     pp->msi_page = alloc_page(GFP_DMA32);
0383     pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
0384                     PAGE_SIZE, DMA_FROM_DEVICE);
0385     ret = dma_mapping_error(dev, pp->msi_data);
0386     if (ret) {
0387         dev_err(pci->dev, "Failed to map MSI data\n");
0388         __free_page(pp->msi_page);
0389         pp->msi_page = NULL;
0390         pp->msi_data = 0;
0391         dw_pcie_free_msi(pp);
0392 
0393         return ret;
0394     }
0395 
0396     return 0;
0397 }
0398 
0399 int dw_pcie_host_init(struct dw_pcie_rp *pp)
0400 {
0401     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0402     struct device *dev = pci->dev;
0403     struct device_node *np = dev->of_node;
0404     struct platform_device *pdev = to_platform_device(dev);
0405     struct resource_entry *win;
0406     struct pci_host_bridge *bridge;
0407     struct resource *res;
0408     int ret;
0409 
0410     raw_spin_lock_init(&pp->lock);
0411 
0412     res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
0413     if (res) {
0414         pp->cfg0_size = resource_size(res);
0415         pp->cfg0_base = res->start;
0416 
0417         pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
0418         if (IS_ERR(pp->va_cfg0_base))
0419             return PTR_ERR(pp->va_cfg0_base);
0420     } else {
0421         dev_err(dev, "Missing *config* reg space\n");
0422         return -ENODEV;
0423     }
0424 
0425     if (!pci->dbi_base) {
0426         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
0427         pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
0428         if (IS_ERR(pci->dbi_base))
0429             return PTR_ERR(pci->dbi_base);
0430     }
0431 
0432     bridge = devm_pci_alloc_host_bridge(dev, 0);
0433     if (!bridge)
0434         return -ENOMEM;
0435 
0436     pp->bridge = bridge;
0437 
0438     /* Get the I/O range from DT */
0439     win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
0440     if (win) {
0441         pp->io_size = resource_size(win->res);
0442         pp->io_bus_addr = win->res->start - win->offset;
0443         pp->io_base = pci_pio_to_address(win->res->start);
0444     }
0445 
0446     if (pci->link_gen < 1)
0447         pci->link_gen = of_pci_get_max_link_speed(np);
0448 
0449     /* Set default bus ops */
0450     bridge->ops = &dw_pcie_ops;
0451     bridge->child_ops = &dw_child_pcie_ops;
0452 
0453     if (pp->ops->host_init) {
0454         ret = pp->ops->host_init(pp);
0455         if (ret)
0456             return ret;
0457     }
0458 
0459     if (pci_msi_enabled()) {
0460         pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
0461                      of_property_read_bool(np, "msi-parent") ||
0462                      of_property_read_bool(np, "msi-map"));
0463 
0464         /*
0465          * For the has_msi_ctrl case the default assignment is handled
0466          * in the dw_pcie_msi_host_init().
0467          */
0468         if (!pp->has_msi_ctrl && !pp->num_vectors) {
0469             pp->num_vectors = MSI_DEF_NUM_VECTORS;
0470         } else if (pp->num_vectors > MAX_MSI_IRQS) {
0471             dev_err(dev, "Invalid number of vectors\n");
0472             ret = -EINVAL;
0473             goto err_deinit_host;
0474         }
0475 
0476         if (pp->ops->msi_host_init) {
0477             ret = pp->ops->msi_host_init(pp);
0478             if (ret < 0)
0479                 goto err_deinit_host;
0480         } else if (pp->has_msi_ctrl) {
0481             ret = dw_pcie_msi_host_init(pp);
0482             if (ret < 0)
0483                 goto err_deinit_host;
0484         }
0485     }
0486 
0487     dw_pcie_version_detect(pci);
0488 
0489     dw_pcie_iatu_detect(pci);
0490 
0491     ret = dw_pcie_setup_rc(pp);
0492     if (ret)
0493         goto err_free_msi;
0494 
0495     if (!dw_pcie_link_up(pci)) {
0496         ret = dw_pcie_start_link(pci);
0497         if (ret)
0498             goto err_free_msi;
0499     }
0500 
0501     /* Ignore errors, the link may come up later */
0502     dw_pcie_wait_for_link(pci);
0503 
0504     bridge->sysdata = pp;
0505 
0506     ret = pci_host_probe(bridge);
0507     if (ret)
0508         goto err_stop_link;
0509 
0510     return 0;
0511 
0512 err_stop_link:
0513     dw_pcie_stop_link(pci);
0514 
0515 err_free_msi:
0516     if (pp->has_msi_ctrl)
0517         dw_pcie_free_msi(pp);
0518 
0519 err_deinit_host:
0520     if (pp->ops->host_deinit)
0521         pp->ops->host_deinit(pp);
0522 
0523     return ret;
0524 }
0525 EXPORT_SYMBOL_GPL(dw_pcie_host_init);
0526 
0527 void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
0528 {
0529     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0530 
0531     pci_stop_root_bus(pp->bridge->bus);
0532     pci_remove_root_bus(pp->bridge->bus);
0533 
0534     dw_pcie_stop_link(pci);
0535 
0536     if (pp->has_msi_ctrl)
0537         dw_pcie_free_msi(pp);
0538 
0539     if (pp->ops->host_deinit)
0540         pp->ops->host_deinit(pp);
0541 }
0542 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
0543 
0544 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
0545                         unsigned int devfn, int where)
0546 {
0547     struct dw_pcie_rp *pp = bus->sysdata;
0548     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0549     int type, ret;
0550     u32 busdev;
0551 
0552     /*
0553      * Checking whether the link is up here is a last line of defense
0554      * against platforms that forward errors on the system bus as
0555      * SError upon PCI configuration transactions issued when the link
0556      * is down. This check is racy by definition and does not stop
0557      * the system from triggering an SError if the link goes down
0558      * after this check is performed.
0559      */
0560     if (!dw_pcie_link_up(pci))
0561         return NULL;
0562 
0563     busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
0564          PCIE_ATU_FUNC(PCI_FUNC(devfn));
0565 
0566     if (pci_is_root_bus(bus->parent))
0567         type = PCIE_ATU_TYPE_CFG0;
0568     else
0569         type = PCIE_ATU_TYPE_CFG1;
0570 
0571     ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
0572                     pp->cfg0_size);
0573     if (ret)
0574         return NULL;
0575 
0576     return pp->va_cfg0_base + where;
0577 }
0578 
0579 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
0580                  int where, int size, u32 *val)
0581 {
0582     struct dw_pcie_rp *pp = bus->sysdata;
0583     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0584     int ret;
0585 
0586     ret = pci_generic_config_read(bus, devfn, where, size, val);
0587     if (ret != PCIBIOS_SUCCESSFUL)
0588         return ret;
0589 
0590     if (pp->cfg0_io_shared) {
0591         ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
0592                         pp->io_base, pp->io_bus_addr,
0593                         pp->io_size);
0594         if (ret)
0595             return PCIBIOS_SET_FAILED;
0596     }
0597 
0598     return PCIBIOS_SUCCESSFUL;
0599 }
0600 
0601 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
0602                  int where, int size, u32 val)
0603 {
0604     struct dw_pcie_rp *pp = bus->sysdata;
0605     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0606     int ret;
0607 
0608     ret = pci_generic_config_write(bus, devfn, where, size, val);
0609     if (ret != PCIBIOS_SUCCESSFUL)
0610         return ret;
0611 
0612     if (pp->cfg0_io_shared) {
0613         ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
0614                         pp->io_base, pp->io_bus_addr,
0615                         pp->io_size);
0616         if (ret)
0617             return PCIBIOS_SET_FAILED;
0618     }
0619 
0620     return PCIBIOS_SUCCESSFUL;
0621 }
0622 
0623 static struct pci_ops dw_child_pcie_ops = {
0624     .map_bus = dw_pcie_other_conf_map_bus,
0625     .read = dw_pcie_rd_other_conf,
0626     .write = dw_pcie_wr_other_conf,
0627 };
0628 
0629 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
0630 {
0631     struct dw_pcie_rp *pp = bus->sysdata;
0632     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0633 
0634     if (PCI_SLOT(devfn) > 0)
0635         return NULL;
0636 
0637     return pci->dbi_base + where;
0638 }
0639 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
0640 
0641 static struct pci_ops dw_pcie_ops = {
0642     .map_bus = dw_pcie_own_conf_map_bus,
0643     .read = pci_generic_config_read,
0644     .write = pci_generic_config_write,
0645 };
0646 
0647 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
0648 {
0649     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0650     struct resource_entry *entry;
0651     int i, ret;
0652 
0653     /* Note the very first outbound ATU is used for CFG IOs */
0654     if (!pci->num_ob_windows) {
0655         dev_err(pci->dev, "No outbound iATU found\n");
0656         return -EINVAL;
0657     }
0658 
0659     /*
0660      * Ensure all outbound windows are disabled before proceeding with
0661      * the MEM/IO ranges setups.
0662      */
0663     for (i = 0; i < pci->num_ob_windows; i++)
0664         dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
0665 
0666     i = 0;
0667     resource_list_for_each_entry(entry, &pp->bridge->windows) {
0668         if (resource_type(entry->res) != IORESOURCE_MEM)
0669             continue;
0670 
0671         if (pci->num_ob_windows <= ++i)
0672             break;
0673 
0674         ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
0675                         entry->res->start,
0676                         entry->res->start - entry->offset,
0677                         resource_size(entry->res));
0678         if (ret) {
0679             dev_err(pci->dev, "Failed to set MEM range %pr\n",
0680                 entry->res);
0681             return ret;
0682         }
0683     }
0684 
0685     if (pp->io_size) {
0686         if (pci->num_ob_windows > ++i) {
0687             ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
0688                             pp->io_base,
0689                             pp->io_bus_addr,
0690                             pp->io_size);
0691             if (ret) {
0692                 dev_err(pci->dev, "Failed to set IO range %pr\n",
0693                     entry->res);
0694                 return ret;
0695             }
0696         } else {
0697             pp->cfg0_io_shared = true;
0698         }
0699     }
0700 
0701     if (pci->num_ob_windows <= i)
0702         dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
0703              pci->num_ob_windows);
0704 
0705     return 0;
0706 }
0707 
0708 int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
0709 {
0710     struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
0711     u32 val, ctrl, num_ctrls;
0712     int ret;
0713 
0714     /*
0715      * Enable DBI read-only registers for writing/updating configuration.
0716      * Write permission gets disabled towards the end of this function.
0717      */
0718     dw_pcie_dbi_ro_wr_en(pci);
0719 
0720     dw_pcie_setup(pci);
0721 
0722     if (pp->has_msi_ctrl) {
0723         num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
0724 
0725         /* Initialize IRQ Status array */
0726         for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
0727             dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
0728                         (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
0729                         pp->irq_mask[ctrl]);
0730             dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
0731                         (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
0732                         ~0);
0733         }
0734     }
0735 
0736     dw_pcie_msi_init(pp);
0737 
0738     /* Setup RC BARs */
0739     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
0740     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
0741 
0742     /* Setup interrupt pins */
0743     val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
0744     val &= 0xffff00ff;
0745     val |= 0x00000100;
0746     dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
0747 
0748     /* Setup bus numbers */
0749     val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
0750     val &= 0xff000000;
0751     val |= 0x00ff0100;
0752     dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
0753 
0754     /* Setup command register */
0755     val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
0756     val &= 0xffff0000;
0757     val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
0758         PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
0759     dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
0760 
0761     /*
0762      * If the platform provides its own child bus config accesses, it means
0763      * the platform uses its own address translation component rather than
0764      * ATU, so we should not program the ATU here.
0765      */
0766     if (pp->bridge->child_ops == &dw_child_pcie_ops) {
0767         ret = dw_pcie_iatu_setup(pp);
0768         if (ret)
0769             return ret;
0770     }
0771 
0772     dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
0773 
0774     /* Program correct class for RC */
0775     dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
0776 
0777     val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
0778     val |= PORT_LOGIC_SPEED_CHANGE;
0779     dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
0780 
0781     dw_pcie_dbi_ro_wr_dis(pci);
0782 
0783     return 0;
0784 }
0785 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);