Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Synopsys DesignWare PCIe host controller driver
0004  *
0005  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
0006  *      https://www.samsung.com
0007  *
0008  * Author: Jingoo Han <jg1.han@samsung.com>
0009  */
0010 
0011 #include <linux/align.h>
0012 #include <linux/bitops.h>
0013 #include <linux/delay.h>
0014 #include <linux/of.h>
0015 #include <linux/of_platform.h>
0016 #include <linux/sizes.h>
0017 #include <linux/types.h>
0018 
0019 #include "../../pci.h"
0020 #include "pcie-designware.h"
0021 
0022 void dw_pcie_version_detect(struct dw_pcie *pci)
0023 {
0024     u32 ver;
0025 
0026     /* The content of the CSR is zero on DWC PCIe older than v4.70a */
0027     ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
0028     if (!ver)
0029         return;
0030 
0031     if (pci->version && pci->version != ver)
0032         dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
0033              pci->version, ver);
0034     else
0035         pci->version = ver;
0036 
0037     ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
0038 
0039     if (pci->type && pci->type != ver)
0040         dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
0041              pci->type, ver);
0042     else
0043         pci->type = ver;
0044 }
0045 
0046 /*
0047  * These interfaces resemble the pci_find_*capability() interfaces, but these
0048  * are for configuring host controllers, which are bridges *to* PCI devices but
0049  * are not PCI devices themselves.
0050  */
0051 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
0052                   u8 cap)
0053 {
0054     u8 cap_id, next_cap_ptr;
0055     u16 reg;
0056 
0057     if (!cap_ptr)
0058         return 0;
0059 
0060     reg = dw_pcie_readw_dbi(pci, cap_ptr);
0061     cap_id = (reg & 0x00ff);
0062 
0063     if (cap_id > PCI_CAP_ID_MAX)
0064         return 0;
0065 
0066     if (cap_id == cap)
0067         return cap_ptr;
0068 
0069     next_cap_ptr = (reg & 0xff00) >> 8;
0070     return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
0071 }
0072 
0073 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
0074 {
0075     u8 next_cap_ptr;
0076     u16 reg;
0077 
0078     reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
0079     next_cap_ptr = (reg & 0x00ff);
0080 
0081     return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
0082 }
0083 EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
0084 
0085 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
0086                         u8 cap)
0087 {
0088     u32 header;
0089     int ttl;
0090     int pos = PCI_CFG_SPACE_SIZE;
0091 
0092     /* minimum 8 bytes per capability */
0093     ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
0094 
0095     if (start)
0096         pos = start;
0097 
0098     header = dw_pcie_readl_dbi(pci, pos);
0099     /*
0100      * If we have no capabilities, this is indicated by cap ID,
0101      * cap version and next pointer all being 0.
0102      */
0103     if (header == 0)
0104         return 0;
0105 
0106     while (ttl-- > 0) {
0107         if (PCI_EXT_CAP_ID(header) == cap && pos != start)
0108             return pos;
0109 
0110         pos = PCI_EXT_CAP_NEXT(header);
0111         if (pos < PCI_CFG_SPACE_SIZE)
0112             break;
0113 
0114         header = dw_pcie_readl_dbi(pci, pos);
0115     }
0116 
0117     return 0;
0118 }
0119 
0120 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
0121 {
0122     return dw_pcie_find_next_ext_capability(pci, 0, cap);
0123 }
0124 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
0125 
0126 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
0127 {
0128     if (!IS_ALIGNED((uintptr_t)addr, size)) {
0129         *val = 0;
0130         return PCIBIOS_BAD_REGISTER_NUMBER;
0131     }
0132 
0133     if (size == 4) {
0134         *val = readl(addr);
0135     } else if (size == 2) {
0136         *val = readw(addr);
0137     } else if (size == 1) {
0138         *val = readb(addr);
0139     } else {
0140         *val = 0;
0141         return PCIBIOS_BAD_REGISTER_NUMBER;
0142     }
0143 
0144     return PCIBIOS_SUCCESSFUL;
0145 }
0146 EXPORT_SYMBOL_GPL(dw_pcie_read);
0147 
0148 int dw_pcie_write(void __iomem *addr, int size, u32 val)
0149 {
0150     if (!IS_ALIGNED((uintptr_t)addr, size))
0151         return PCIBIOS_BAD_REGISTER_NUMBER;
0152 
0153     if (size == 4)
0154         writel(val, addr);
0155     else if (size == 2)
0156         writew(val, addr);
0157     else if (size == 1)
0158         writeb(val, addr);
0159     else
0160         return PCIBIOS_BAD_REGISTER_NUMBER;
0161 
0162     return PCIBIOS_SUCCESSFUL;
0163 }
0164 EXPORT_SYMBOL_GPL(dw_pcie_write);
0165 
0166 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
0167 {
0168     int ret;
0169     u32 val;
0170 
0171     if (pci->ops && pci->ops->read_dbi)
0172         return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
0173 
0174     ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
0175     if (ret)
0176         dev_err(pci->dev, "Read DBI address failed\n");
0177 
0178     return val;
0179 }
0180 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
0181 
0182 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
0183 {
0184     int ret;
0185 
0186     if (pci->ops && pci->ops->write_dbi) {
0187         pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
0188         return;
0189     }
0190 
0191     ret = dw_pcie_write(pci->dbi_base + reg, size, val);
0192     if (ret)
0193         dev_err(pci->dev, "Write DBI address failed\n");
0194 }
0195 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
0196 
0197 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
0198 {
0199     int ret;
0200 
0201     if (pci->ops && pci->ops->write_dbi2) {
0202         pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
0203         return;
0204     }
0205 
0206     ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
0207     if (ret)
0208         dev_err(pci->dev, "write DBI address failed\n");
0209 }
0210 
0211 static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
0212                            u32 index)
0213 {
0214     if (pci->iatu_unroll_enabled)
0215         return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
0216 
0217     dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
0218     return pci->atu_base;
0219 }
0220 
0221 static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
0222 {
0223     void __iomem *base;
0224     int ret;
0225     u32 val;
0226 
0227     base = dw_pcie_select_atu(pci, dir, index);
0228 
0229     if (pci->ops && pci->ops->read_dbi)
0230         return pci->ops->read_dbi(pci, base, reg, 4);
0231 
0232     ret = dw_pcie_read(base + reg, 4, &val);
0233     if (ret)
0234         dev_err(pci->dev, "Read ATU address failed\n");
0235 
0236     return val;
0237 }
0238 
0239 static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
0240                    u32 reg, u32 val)
0241 {
0242     void __iomem *base;
0243     int ret;
0244 
0245     base = dw_pcie_select_atu(pci, dir, index);
0246 
0247     if (pci->ops && pci->ops->write_dbi) {
0248         pci->ops->write_dbi(pci, base, reg, 4, val);
0249         return;
0250     }
0251 
0252     ret = dw_pcie_write(base + reg, 4, val);
0253     if (ret)
0254         dev_err(pci->dev, "Write ATU address failed\n");
0255 }
0256 
0257 static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
0258 {
0259     return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
0260 }
0261 
0262 static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
0263                      u32 val)
0264 {
0265     dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
0266 }
0267 
0268 static inline u32 dw_pcie_enable_ecrc(u32 val)
0269 {
0270     /*
0271      * DesignWare core version 4.90A has a design issue where the 'TD'
0272      * bit in the Control register-1 of the ATU outbound region acts
0273      * like an override for the ECRC setting, i.e., the presence of TLP
0274      * Digest (ECRC) in the outgoing TLPs is solely determined by this
0275      * bit. This is contrary to the PCIe spec which says that the
0276      * enablement of the ECRC is solely determined by the AER
0277      * registers.
0278      *
0279      * Because of this, even when the ECRC is enabled through AER
0280      * registers, the transactions going through ATU won't have TLP
0281      * Digest as there is no way the PCI core AER code could program
0282      * the TD bit which is specific to the DesignWare core.
0283      *
0284      * The best way to handle this scenario is to program the TD bit
0285      * always. It affects only the traffic from root port to downstream
0286      * devices.
0287      *
0288      * At this point,
0289      * When ECRC is enabled in AER registers, everything works normally
0290      * When ECRC is NOT enabled in AER registers, then,
0291      * on Root Port:- TLP Digest (DWord size) gets appended to each packet
0292      *                even through it is not required. Since downstream
0293      *                TLPs are mostly for configuration accesses and BAR
0294      *                accesses, they are not in critical path and won't
0295      *                have much negative effect on the performance.
0296      * on End Point:- TLP Digest is received for some/all the packets coming
0297      *                from the root port. TLP Digest is ignored because,
0298      *                as per the PCIe Spec r5.0 v1.0 section 2.2.3
0299      *                "TLP Digest Rules", when an endpoint receives TLP
0300      *                Digest when its ECRC check functionality is disabled
0301      *                in AER registers, received TLP Digest is just ignored.
0302      * Since there is no issue or error reported either side, best way to
0303      * handle the scenario is to program TD bit by default.
0304      */
0305 
0306     return val | PCIE_ATU_TD;
0307 }
0308 
0309 static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
0310                        int index, int type, u64 cpu_addr,
0311                        u64 pci_addr, u64 size)
0312 {
0313     u32 retries, val;
0314     u64 limit_addr;
0315 
0316     if (pci->ops && pci->ops->cpu_addr_fixup)
0317         cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
0318 
0319     limit_addr = cpu_addr + size - 1;
0320 
0321     if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
0322         !IS_ALIGNED(cpu_addr, pci->region_align) ||
0323         !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
0324         return -EINVAL;
0325     }
0326 
0327     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
0328                   lower_32_bits(cpu_addr));
0329     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
0330                   upper_32_bits(cpu_addr));
0331 
0332     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
0333                   lower_32_bits(limit_addr));
0334     if (dw_pcie_ver_is_ge(pci, 460A))
0335         dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
0336                       upper_32_bits(limit_addr));
0337 
0338     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
0339                   lower_32_bits(pci_addr));
0340     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
0341                   upper_32_bits(pci_addr));
0342 
0343     val = type | PCIE_ATU_FUNC_NUM(func_no);
0344     if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
0345         dw_pcie_ver_is_ge(pci, 460A))
0346         val |= PCIE_ATU_INCREASE_REGION_SIZE;
0347     if (dw_pcie_ver_is(pci, 490A))
0348         val = dw_pcie_enable_ecrc(val);
0349     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
0350 
0351     dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
0352 
0353     /*
0354      * Make sure ATU enable takes effect before any subsequent config
0355      * and I/O accesses.
0356      */
0357     for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
0358         val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
0359         if (val & PCIE_ATU_ENABLE)
0360             return 0;
0361 
0362         mdelay(LINK_WAIT_IATU);
0363     }
0364 
0365     dev_err(pci->dev, "Outbound iATU is not being enabled\n");
0366 
0367     return -ETIMEDOUT;
0368 }
0369 
0370 int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
0371                   u64 cpu_addr, u64 pci_addr, u64 size)
0372 {
0373     return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
0374                        cpu_addr, pci_addr, size);
0375 }
0376 
0377 int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
0378                  int type, u64 cpu_addr, u64 pci_addr,
0379                  u64 size)
0380 {
0381     return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
0382                        cpu_addr, pci_addr, size);
0383 }
0384 
0385 static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
0386 {
0387     return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
0388 }
0389 
0390 static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
0391                      u32 val)
0392 {
0393     dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
0394 }
0395 
0396 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
0397                  int type, u64 cpu_addr, u8 bar)
0398 {
0399     u32 retries, val;
0400 
0401     if (!IS_ALIGNED(cpu_addr, pci->region_align))
0402         return -EINVAL;
0403 
0404     dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
0405                   lower_32_bits(cpu_addr));
0406     dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
0407                   upper_32_bits(cpu_addr));
0408 
0409     dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
0410                   PCIE_ATU_FUNC_NUM(func_no));
0411     dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
0412                   PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
0413                   PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
0414 
0415     /*
0416      * Make sure ATU enable takes effect before any subsequent config
0417      * and I/O accesses.
0418      */
0419     for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
0420         val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
0421         if (val & PCIE_ATU_ENABLE)
0422             return 0;
0423 
0424         mdelay(LINK_WAIT_IATU);
0425     }
0426 
0427     dev_err(pci->dev, "Inbound iATU is not being enabled\n");
0428 
0429     return -ETIMEDOUT;
0430 }
0431 
0432 void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
0433 {
0434     dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
0435 }
0436 
0437 int dw_pcie_wait_for_link(struct dw_pcie *pci)
0438 {
0439     u32 offset, val;
0440     int retries;
0441 
0442     /* Check if the link is up or not */
0443     for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
0444         if (dw_pcie_link_up(pci))
0445             break;
0446 
0447         usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
0448     }
0449 
0450     if (retries >= LINK_WAIT_MAX_RETRIES) {
0451         dev_err(pci->dev, "Phy link never came up\n");
0452         return -ETIMEDOUT;
0453     }
0454 
0455     offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
0456     val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
0457 
0458     dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
0459          FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
0460          FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
0461 
0462     return 0;
0463 }
0464 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
0465 
0466 int dw_pcie_link_up(struct dw_pcie *pci)
0467 {
0468     u32 val;
0469 
0470     if (pci->ops && pci->ops->link_up)
0471         return pci->ops->link_up(pci);
0472 
0473     val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
0474     return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
0475         (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
0476 }
0477 EXPORT_SYMBOL_GPL(dw_pcie_link_up);
0478 
0479 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
0480 {
0481     u32 val;
0482 
0483     val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
0484     val |= PORT_MLTI_UPCFG_SUPPORT;
0485     dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
0486 }
0487 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
0488 
0489 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
0490 {
0491     u32 cap, ctrl2, link_speed;
0492     u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
0493 
0494     cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
0495     ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
0496     ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
0497 
0498     switch (pcie_link_speed[link_gen]) {
0499     case PCIE_SPEED_2_5GT:
0500         link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
0501         break;
0502     case PCIE_SPEED_5_0GT:
0503         link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
0504         break;
0505     case PCIE_SPEED_8_0GT:
0506         link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
0507         break;
0508     case PCIE_SPEED_16_0GT:
0509         link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
0510         break;
0511     default:
0512         /* Use hardware capability */
0513         link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
0514         ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
0515         break;
0516     }
0517 
0518     dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
0519 
0520     cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
0521     dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
0522 
0523 }
0524 
0525 static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
0526 {
0527     u32 val;
0528 
0529     val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
0530     if (val == 0xffffffff)
0531         return true;
0532 
0533     return false;
0534 }
0535 
0536 static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
0537 {
0538     int max_region, ob, ib;
0539     u32 val, min, dir;
0540     u64 max;
0541 
0542     if (pci->iatu_unroll_enabled) {
0543         max_region = min((int)pci->atu_size / 512, 256);
0544     } else {
0545         dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
0546         max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
0547     }
0548 
0549     for (ob = 0; ob < max_region; ob++) {
0550         dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
0551         val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
0552         if (val != 0x11110000)
0553             break;
0554     }
0555 
0556     for (ib = 0; ib < max_region; ib++) {
0557         dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
0558         val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
0559         if (val != 0x11110000)
0560             break;
0561     }
0562 
0563     if (ob) {
0564         dir = PCIE_ATU_REGION_DIR_OB;
0565     } else if (ib) {
0566         dir = PCIE_ATU_REGION_DIR_IB;
0567     } else {
0568         dev_err(pci->dev, "No iATU regions found\n");
0569         return;
0570     }
0571 
0572     dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
0573     min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
0574 
0575     if (dw_pcie_ver_is_ge(pci, 460A)) {
0576         dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
0577         max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
0578     } else {
0579         max = 0;
0580     }
0581 
0582     pci->num_ob_windows = ob;
0583     pci->num_ib_windows = ib;
0584     pci->region_align = 1 << fls(min);
0585     pci->region_limit = (max << 32) | (SZ_4G - 1);
0586 }
0587 
0588 void dw_pcie_iatu_detect(struct dw_pcie *pci)
0589 {
0590     struct platform_device *pdev = to_platform_device(pci->dev);
0591 
0592     pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
0593     if (pci->iatu_unroll_enabled) {
0594         if (!pci->atu_base) {
0595             struct resource *res =
0596                 platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
0597             if (res) {
0598                 pci->atu_size = resource_size(res);
0599                 pci->atu_base = devm_ioremap_resource(pci->dev, res);
0600             }
0601             if (!pci->atu_base || IS_ERR(pci->atu_base))
0602                 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
0603         }
0604 
0605         if (!pci->atu_size)
0606             /* Pick a minimal default, enough for 8 in and 8 out windows */
0607             pci->atu_size = SZ_4K;
0608     } else {
0609         pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
0610         pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
0611     }
0612 
0613     dw_pcie_iatu_detect_regions(pci);
0614 
0615     dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
0616         "enabled" : "disabled");
0617 
0618     dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
0619          pci->num_ob_windows, pci->num_ib_windows,
0620          pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
0621 }
0622 
0623 void dw_pcie_setup(struct dw_pcie *pci)
0624 {
0625     struct device_node *np = pci->dev->of_node;
0626     u32 val;
0627 
0628     if (pci->link_gen > 0)
0629         dw_pcie_link_set_max_speed(pci, pci->link_gen);
0630 
0631     /* Configure Gen1 N_FTS */
0632     if (pci->n_fts[0]) {
0633         val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
0634         val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
0635         val |= PORT_AFR_N_FTS(pci->n_fts[0]);
0636         val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
0637         dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
0638     }
0639 
0640     /* Configure Gen2+ N_FTS */
0641     if (pci->n_fts[1]) {
0642         val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
0643         val &= ~PORT_LOGIC_N_FTS_MASK;
0644         val |= pci->n_fts[pci->link_gen - 1];
0645         dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
0646     }
0647 
0648     val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
0649     val &= ~PORT_LINK_FAST_LINK_MODE;
0650     val |= PORT_LINK_DLL_LINK_EN;
0651     dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
0652 
0653     if (of_property_read_bool(np, "snps,enable-cdm-check")) {
0654         val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
0655         val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
0656                PCIE_PL_CHK_REG_CHK_REG_START;
0657         dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
0658     }
0659 
0660     of_property_read_u32(np, "num-lanes", &pci->num_lanes);
0661     if (!pci->num_lanes) {
0662         dev_dbg(pci->dev, "Using h/w default number of lanes\n");
0663         return;
0664     }
0665 
0666     /* Set the number of lanes */
0667     val &= ~PORT_LINK_FAST_LINK_MODE;
0668     val &= ~PORT_LINK_MODE_MASK;
0669     switch (pci->num_lanes) {
0670     case 1:
0671         val |= PORT_LINK_MODE_1_LANES;
0672         break;
0673     case 2:
0674         val |= PORT_LINK_MODE_2_LANES;
0675         break;
0676     case 4:
0677         val |= PORT_LINK_MODE_4_LANES;
0678         break;
0679     case 8:
0680         val |= PORT_LINK_MODE_8_LANES;
0681         break;
0682     default:
0683         dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
0684         return;
0685     }
0686     dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
0687 
0688     /* Set link width speed control register */
0689     val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
0690     val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
0691     switch (pci->num_lanes) {
0692     case 1:
0693         val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
0694         break;
0695     case 2:
0696         val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
0697         break;
0698     case 4:
0699         val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
0700         break;
0701     case 8:
0702         val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
0703         break;
0704     }
0705     dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
0706 }