Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Driver for the Aardvark PCIe controller, used on Marvell Armada
0004  * 3700.
0005  *
0006  * Copyright (C) 2016 Marvell
0007  *
0008  * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
0009  */
0010 
0011 #include <linux/bitfield.h>
0012 #include <linux/delay.h>
0013 #include <linux/gpio/consumer.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/irq.h>
0016 #include <linux/irqdomain.h>
0017 #include <linux/kernel.h>
0018 #include <linux/module.h>
0019 #include <linux/pci.h>
0020 #include <linux/pci-ecam.h>
0021 #include <linux/init.h>
0022 #include <linux/phy/phy.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/msi.h>
0025 #include <linux/of_address.h>
0026 #include <linux/of_gpio.h>
0027 #include <linux/of_pci.h>
0028 
0029 #include "../pci.h"
0030 #include "../pci-bridge-emul.h"
0031 
0032 /* PCIe core registers */
0033 #define PCIE_CORE_DEV_ID_REG                    0x0
0034 #define PCIE_CORE_CMD_STATUS_REG                0x4
0035 #define PCIE_CORE_DEV_REV_REG                   0x8
0036 #define PCIE_CORE_PCIEXP_CAP                    0xc0
0037 #define PCIE_CORE_PCIERR_CAP                    0x100
0038 #define PCIE_CORE_ERR_CAPCTL_REG                0x118
0039 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX            BIT(5)
0040 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN         BIT(6)
0041 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK          BIT(7)
0042 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV          BIT(8)
0043 /* PIO registers base address and register offsets */
0044 #define PIO_BASE_ADDR               0x4000
0045 #define PIO_CTRL                (PIO_BASE_ADDR + 0x0)
0046 #define   PIO_CTRL_TYPE_MASK            GENMASK(3, 0)
0047 #define   PIO_CTRL_ADDR_WIN_DISABLE     BIT(24)
0048 #define PIO_STAT                (PIO_BASE_ADDR + 0x4)
0049 #define   PIO_COMPLETION_STATUS_SHIFT       7
0050 #define   PIO_COMPLETION_STATUS_MASK        GENMASK(9, 7)
0051 #define   PIO_COMPLETION_STATUS_OK      0
0052 #define   PIO_COMPLETION_STATUS_UR      1
0053 #define   PIO_COMPLETION_STATUS_CRS     2
0054 #define   PIO_COMPLETION_STATUS_CA      4
0055 #define   PIO_NON_POSTED_REQ            BIT(10)
0056 #define   PIO_ERR_STATUS            BIT(11)
0057 #define PIO_ADDR_LS             (PIO_BASE_ADDR + 0x8)
0058 #define PIO_ADDR_MS             (PIO_BASE_ADDR + 0xc)
0059 #define PIO_WR_DATA             (PIO_BASE_ADDR + 0x10)
0060 #define PIO_WR_DATA_STRB            (PIO_BASE_ADDR + 0x14)
0061 #define PIO_RD_DATA             (PIO_BASE_ADDR + 0x18)
0062 #define PIO_START               (PIO_BASE_ADDR + 0x1c)
0063 #define PIO_ISR                 (PIO_BASE_ADDR + 0x20)
0064 #define PIO_ISRM                (PIO_BASE_ADDR + 0x24)
0065 
0066 /* Aardvark Control registers */
0067 #define CONTROL_BASE_ADDR           0x4800
0068 #define PCIE_CORE_CTRL0_REG         (CONTROL_BASE_ADDR + 0x0)
0069 #define     PCIE_GEN_SEL_MSK            0x3
0070 #define     PCIE_GEN_SEL_SHIFT          0x0
0071 #define     SPEED_GEN_1             0
0072 #define     SPEED_GEN_2             1
0073 #define     SPEED_GEN_3             2
0074 #define     IS_RC_MSK               1
0075 #define     IS_RC_SHIFT             2
0076 #define     LANE_CNT_MSK            0x18
0077 #define     LANE_CNT_SHIFT          0x3
0078 #define     LANE_COUNT_1            (0 << LANE_CNT_SHIFT)
0079 #define     LANE_COUNT_2            (1 << LANE_CNT_SHIFT)
0080 #define     LANE_COUNT_4            (2 << LANE_CNT_SHIFT)
0081 #define     LANE_COUNT_8            (3 << LANE_CNT_SHIFT)
0082 #define     LINK_TRAINING_EN            BIT(6)
0083 #define     LEGACY_INTA             BIT(28)
0084 #define     LEGACY_INTB             BIT(29)
0085 #define     LEGACY_INTC             BIT(30)
0086 #define     LEGACY_INTD             BIT(31)
0087 #define PCIE_CORE_CTRL1_REG         (CONTROL_BASE_ADDR + 0x4)
0088 #define     HOT_RESET_GEN           BIT(0)
0089 #define PCIE_CORE_CTRL2_REG         (CONTROL_BASE_ADDR + 0x8)
0090 #define     PCIE_CORE_CTRL2_RESERVED        0x7
0091 #define     PCIE_CORE_CTRL2_TD_ENABLE       BIT(4)
0092 #define     PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
0093 #define     PCIE_CORE_CTRL2_OB_WIN_ENABLE   BIT(6)
0094 #define     PCIE_CORE_CTRL2_MSI_ENABLE      BIT(10)
0095 #define PCIE_CORE_REF_CLK_REG           (CONTROL_BASE_ADDR + 0x14)
0096 #define     PCIE_CORE_REF_CLK_TX_ENABLE     BIT(1)
0097 #define     PCIE_CORE_REF_CLK_RX_ENABLE     BIT(2)
0098 #define PCIE_MSG_LOG_REG            (CONTROL_BASE_ADDR + 0x30)
0099 #define PCIE_ISR0_REG               (CONTROL_BASE_ADDR + 0x40)
0100 #define PCIE_MSG_PM_PME_MASK            BIT(7)
0101 #define PCIE_ISR0_MASK_REG          (CONTROL_BASE_ADDR + 0x44)
0102 #define     PCIE_ISR0_MSI_INT_PENDING       BIT(24)
0103 #define     PCIE_ISR0_CORR_ERR          BIT(11)
0104 #define     PCIE_ISR0_NFAT_ERR          BIT(12)
0105 #define     PCIE_ISR0_FAT_ERR           BIT(13)
0106 #define     PCIE_ISR0_ERR_MASK          GENMASK(13, 11)
0107 #define     PCIE_ISR0_INTX_ASSERT(val)      BIT(16 + (val))
0108 #define     PCIE_ISR0_INTX_DEASSERT(val)    BIT(20 + (val))
0109 #define     PCIE_ISR0_ALL_MASK          GENMASK(31, 0)
0110 #define PCIE_ISR1_REG               (CONTROL_BASE_ADDR + 0x48)
0111 #define PCIE_ISR1_MASK_REG          (CONTROL_BASE_ADDR + 0x4C)
0112 #define     PCIE_ISR1_POWER_STATE_CHANGE    BIT(4)
0113 #define     PCIE_ISR1_FLUSH         BIT(5)
0114 #define     PCIE_ISR1_INTX_ASSERT(val)      BIT(8 + (val))
0115 #define     PCIE_ISR1_ALL_MASK          GENMASK(31, 0)
0116 #define PCIE_MSI_ADDR_LOW_REG           (CONTROL_BASE_ADDR + 0x50)
0117 #define PCIE_MSI_ADDR_HIGH_REG          (CONTROL_BASE_ADDR + 0x54)
0118 #define PCIE_MSI_STATUS_REG         (CONTROL_BASE_ADDR + 0x58)
0119 #define PCIE_MSI_MASK_REG           (CONTROL_BASE_ADDR + 0x5C)
0120 #define     PCIE_MSI_ALL_MASK           GENMASK(31, 0)
0121 #define PCIE_MSI_PAYLOAD_REG            (CONTROL_BASE_ADDR + 0x9C)
0122 #define     PCIE_MSI_DATA_MASK          GENMASK(15, 0)
0123 
0124 /* PCIe window configuration */
0125 #define OB_WIN_BASE_ADDR            0x4c00
0126 #define OB_WIN_BLOCK_SIZE           0x20
0127 #define OB_WIN_COUNT                8
0128 #define OB_WIN_REG_ADDR(win, offset)        (OB_WIN_BASE_ADDR + \
0129                          OB_WIN_BLOCK_SIZE * (win) + \
0130                          (offset))
0131 #define OB_WIN_MATCH_LS(win)            OB_WIN_REG_ADDR(win, 0x00)
0132 #define     OB_WIN_ENABLE           BIT(0)
0133 #define OB_WIN_MATCH_MS(win)            OB_WIN_REG_ADDR(win, 0x04)
0134 #define OB_WIN_REMAP_LS(win)            OB_WIN_REG_ADDR(win, 0x08)
0135 #define OB_WIN_REMAP_MS(win)            OB_WIN_REG_ADDR(win, 0x0c)
0136 #define OB_WIN_MASK_LS(win)         OB_WIN_REG_ADDR(win, 0x10)
0137 #define OB_WIN_MASK_MS(win)         OB_WIN_REG_ADDR(win, 0x14)
0138 #define OB_WIN_ACTIONS(win)         OB_WIN_REG_ADDR(win, 0x18)
0139 #define OB_WIN_DEFAULT_ACTIONS          (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
0140 #define     OB_WIN_FUNC_NUM_MASK        GENMASK(31, 24)
0141 #define     OB_WIN_FUNC_NUM_SHIFT       24
0142 #define     OB_WIN_FUNC_NUM_ENABLE      BIT(23)
0143 #define     OB_WIN_BUS_NUM_BITS_MASK        GENMASK(22, 20)
0144 #define     OB_WIN_BUS_NUM_BITS_SHIFT       20
0145 #define     OB_WIN_MSG_CODE_ENABLE      BIT(22)
0146 #define     OB_WIN_MSG_CODE_MASK        GENMASK(21, 14)
0147 #define     OB_WIN_MSG_CODE_SHIFT       14
0148 #define     OB_WIN_MSG_PAYLOAD_LEN      BIT(12)
0149 #define     OB_WIN_ATTR_ENABLE          BIT(11)
0150 #define     OB_WIN_ATTR_TC_MASK         GENMASK(10, 8)
0151 #define     OB_WIN_ATTR_TC_SHIFT        8
0152 #define     OB_WIN_ATTR_RELAXED         BIT(7)
0153 #define     OB_WIN_ATTR_NOSNOOP         BIT(6)
0154 #define     OB_WIN_ATTR_POISON          BIT(5)
0155 #define     OB_WIN_ATTR_IDO         BIT(4)
0156 #define     OB_WIN_TYPE_MASK            GENMASK(3, 0)
0157 #define     OB_WIN_TYPE_SHIFT           0
0158 #define     OB_WIN_TYPE_MEM         0x0
0159 #define     OB_WIN_TYPE_IO          0x4
0160 #define     OB_WIN_TYPE_CONFIG_TYPE0        0x8
0161 #define     OB_WIN_TYPE_CONFIG_TYPE1        0x9
0162 #define     OB_WIN_TYPE_MSG         0xc
0163 
0164 /* LMI registers base address and register offsets */
0165 #define LMI_BASE_ADDR               0x6000
0166 #define CFG_REG                 (LMI_BASE_ADDR + 0x0)
0167 #define     LTSSM_SHIFT             24
0168 #define     LTSSM_MASK              0x3f
0169 #define     RC_BAR_CONFIG           0x300
0170 
0171 /* LTSSM values in CFG_REG */
0172 enum {
0173     LTSSM_DETECT_QUIET          = 0x0,
0174     LTSSM_DETECT_ACTIVE         = 0x1,
0175     LTSSM_POLLING_ACTIVE            = 0x2,
0176     LTSSM_POLLING_COMPLIANCE        = 0x3,
0177     LTSSM_POLLING_CONFIGURATION     = 0x4,
0178     LTSSM_CONFIG_LINKWIDTH_START        = 0x5,
0179     LTSSM_CONFIG_LINKWIDTH_ACCEPT       = 0x6,
0180     LTSSM_CONFIG_LANENUM_ACCEPT     = 0x7,
0181     LTSSM_CONFIG_LANENUM_WAIT       = 0x8,
0182     LTSSM_CONFIG_COMPLETE           = 0x9,
0183     LTSSM_CONFIG_IDLE           = 0xa,
0184     LTSSM_RECOVERY_RCVR_LOCK        = 0xb,
0185     LTSSM_RECOVERY_SPEED            = 0xc,
0186     LTSSM_RECOVERY_RCVR_CFG         = 0xd,
0187     LTSSM_RECOVERY_IDLE         = 0xe,
0188     LTSSM_L0                = 0x10,
0189     LTSSM_RX_L0S_ENTRY          = 0x11,
0190     LTSSM_RX_L0S_IDLE           = 0x12,
0191     LTSSM_RX_L0S_FTS            = 0x13,
0192     LTSSM_TX_L0S_ENTRY          = 0x14,
0193     LTSSM_TX_L0S_IDLE           = 0x15,
0194     LTSSM_TX_L0S_FTS            = 0x16,
0195     LTSSM_L1_ENTRY              = 0x17,
0196     LTSSM_L1_IDLE               = 0x18,
0197     LTSSM_L2_IDLE               = 0x19,
0198     LTSSM_L2_TRANSMIT_WAKE          = 0x1a,
0199     LTSSM_DISABLED              = 0x20,
0200     LTSSM_LOOPBACK_ENTRY_MASTER     = 0x21,
0201     LTSSM_LOOPBACK_ACTIVE_MASTER        = 0x22,
0202     LTSSM_LOOPBACK_EXIT_MASTER      = 0x23,
0203     LTSSM_LOOPBACK_ENTRY_SLAVE      = 0x24,
0204     LTSSM_LOOPBACK_ACTIVE_SLAVE     = 0x25,
0205     LTSSM_LOOPBACK_EXIT_SLAVE       = 0x26,
0206     LTSSM_HOT_RESET             = 0x27,
0207     LTSSM_RECOVERY_EQUALIZATION_PHASE0  = 0x28,
0208     LTSSM_RECOVERY_EQUALIZATION_PHASE1  = 0x29,
0209     LTSSM_RECOVERY_EQUALIZATION_PHASE2  = 0x2a,
0210     LTSSM_RECOVERY_EQUALIZATION_PHASE3  = 0x2b,
0211 };
0212 
0213 #define VENDOR_ID_REG               (LMI_BASE_ADDR + 0x44)
0214 
0215 /* PCIe core controller registers */
0216 #define CTRL_CORE_BASE_ADDR         0x18000
0217 #define CTRL_CONFIG_REG             (CTRL_CORE_BASE_ADDR + 0x0)
0218 #define     CTRL_MODE_SHIFT         0x0
0219 #define     CTRL_MODE_MASK          0x1
0220 #define     PCIE_CORE_MODE_DIRECT       0x0
0221 #define     PCIE_CORE_MODE_COMMAND      0x1
0222 
0223 /* PCIe Central Interrupts Registers */
0224 #define CENTRAL_INT_BASE_ADDR           0x1b000
0225 #define HOST_CTRL_INT_STATUS_REG        (CENTRAL_INT_BASE_ADDR + 0x0)
0226 #define HOST_CTRL_INT_MASK_REG          (CENTRAL_INT_BASE_ADDR + 0x4)
0227 #define     PCIE_IRQ_CMDQ_INT           BIT(0)
0228 #define     PCIE_IRQ_MSI_STATUS_INT     BIT(1)
0229 #define     PCIE_IRQ_CMD_SENT_DONE      BIT(3)
0230 #define     PCIE_IRQ_DMA_INT            BIT(4)
0231 #define     PCIE_IRQ_IB_DXFERDONE       BIT(5)
0232 #define     PCIE_IRQ_OB_DXFERDONE       BIT(6)
0233 #define     PCIE_IRQ_OB_RXFERDONE       BIT(7)
0234 #define     PCIE_IRQ_COMPQ_INT          BIT(12)
0235 #define     PCIE_IRQ_DIR_RD_DDR_DET     BIT(13)
0236 #define     PCIE_IRQ_DIR_WR_DDR_DET     BIT(14)
0237 #define     PCIE_IRQ_CORE_INT           BIT(16)
0238 #define     PCIE_IRQ_CORE_INT_PIO       BIT(17)
0239 #define     PCIE_IRQ_DPMU_INT           BIT(18)
0240 #define     PCIE_IRQ_PCIE_MIS_INT       BIT(19)
0241 #define     PCIE_IRQ_MSI_INT1_DET       BIT(20)
0242 #define     PCIE_IRQ_MSI_INT2_DET       BIT(21)
0243 #define     PCIE_IRQ_RC_DBELL_DET       BIT(22)
0244 #define     PCIE_IRQ_EP_STATUS          BIT(23)
0245 #define     PCIE_IRQ_ALL_MASK           GENMASK(31, 0)
0246 #define     PCIE_IRQ_ENABLE_INTS_MASK       PCIE_IRQ_CORE_INT
0247 
0248 /* Transaction types */
0249 #define PCIE_CONFIG_RD_TYPE0            0x8
0250 #define PCIE_CONFIG_RD_TYPE1            0x9
0251 #define PCIE_CONFIG_WR_TYPE0            0xa
0252 #define PCIE_CONFIG_WR_TYPE1            0xb
0253 
0254 #define PIO_RETRY_CNT           750000 /* 1.5 s */
0255 #define PIO_RETRY_DELAY         2 /* 2 us*/
0256 
0257 #define LINK_WAIT_MAX_RETRIES       10
0258 #define LINK_WAIT_USLEEP_MIN        90000
0259 #define LINK_WAIT_USLEEP_MAX        100000
0260 #define RETRAIN_WAIT_MAX_RETRIES    10
0261 #define RETRAIN_WAIT_USLEEP_US      2000
0262 
0263 #define MSI_IRQ_NUM         32
0264 
0265 #define CFG_RD_CRS_VAL          0xffff0001
0266 
0267 struct advk_pcie {
0268     struct platform_device *pdev;
0269     void __iomem *base;
0270     struct {
0271         phys_addr_t match;
0272         phys_addr_t remap;
0273         phys_addr_t mask;
0274         u32 actions;
0275     } wins[OB_WIN_COUNT];
0276     u8 wins_count;
0277     struct irq_domain *rp_irq_domain;
0278     struct irq_domain *irq_domain;
0279     struct irq_chip irq_chip;
0280     raw_spinlock_t irq_lock;
0281     struct irq_domain *msi_domain;
0282     struct irq_domain *msi_inner_domain;
0283     raw_spinlock_t msi_irq_lock;
0284     DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
0285     struct mutex msi_used_lock;
0286     int link_gen;
0287     struct pci_bridge_emul bridge;
0288     struct gpio_desc *reset_gpio;
0289     struct phy *phy;
0290 };
0291 
0292 static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
0293 {
0294     writel(val, pcie->base + reg);
0295 }
0296 
0297 static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
0298 {
0299     return readl(pcie->base + reg);
0300 }
0301 
0302 static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
0303 {
0304     u32 val;
0305     u8 ltssm_state;
0306 
0307     val = advk_readl(pcie, CFG_REG);
0308     ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
0309     return ltssm_state;
0310 }
0311 
0312 static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
0313 {
0314     /* check if LTSSM is in normal operation - some L* state */
0315     u8 ltssm_state = advk_pcie_ltssm_state(pcie);
0316     return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
0317 }
0318 
0319 static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
0320 {
0321     /*
0322      * According to PCIe Base specification 3.0, Table 4-14: Link
0323      * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
0324      * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
0325      * L0s, L1 and L2 states. And according to 3.2.1. Data Link
0326      * Control and Management State Machine Rules is DL Up status
0327      * reported in DL Active state.
0328      */
0329     u8 ltssm_state = advk_pcie_ltssm_state(pcie);
0330     return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
0331 }
0332 
0333 static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
0334 {
0335     /*
0336      * According to PCIe Base specification 3.0, Table 4-14: Link
0337      * Status Mapped to the LTSSM is Link Training mapped to LTSSM
0338      * Configuration and Recovery states.
0339      */
0340     u8 ltssm_state = advk_pcie_ltssm_state(pcie);
0341     return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
0342          ltssm_state < LTSSM_L0) ||
0343         (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
0344          ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
0345 }
0346 
0347 static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
0348 {
0349     int retries;
0350 
0351     /* check if the link is up or not */
0352     for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
0353         if (advk_pcie_link_up(pcie))
0354             return 0;
0355 
0356         usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
0357     }
0358 
0359     return -ETIMEDOUT;
0360 }
0361 
0362 static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
0363 {
0364     size_t retries;
0365 
0366     for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
0367         if (advk_pcie_link_training(pcie))
0368             break;
0369         udelay(RETRAIN_WAIT_USLEEP_US);
0370     }
0371 }
0372 
0373 static void advk_pcie_issue_perst(struct advk_pcie *pcie)
0374 {
0375     if (!pcie->reset_gpio)
0376         return;
0377 
0378     /* 10ms delay is needed for some cards */
0379     dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
0380     gpiod_set_value_cansleep(pcie->reset_gpio, 1);
0381     usleep_range(10000, 11000);
0382     gpiod_set_value_cansleep(pcie->reset_gpio, 0);
0383 }
0384 
0385 static void advk_pcie_train_link(struct advk_pcie *pcie)
0386 {
0387     struct device *dev = &pcie->pdev->dev;
0388     u32 reg;
0389     int ret;
0390 
0391     /*
0392      * Setup PCIe rev / gen compliance based on device tree property
0393      * 'max-link-speed' which also forces maximal link speed.
0394      */
0395     reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
0396     reg &= ~PCIE_GEN_SEL_MSK;
0397     if (pcie->link_gen == 3)
0398         reg |= SPEED_GEN_3;
0399     else if (pcie->link_gen == 2)
0400         reg |= SPEED_GEN_2;
0401     else
0402         reg |= SPEED_GEN_1;
0403     advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
0404 
0405     /*
0406      * Set maximal link speed value also into PCIe Link Control 2 register.
0407      * Armada 3700 Functional Specification says that default value is based
0408      * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
0409      */
0410     reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
0411     reg &= ~PCI_EXP_LNKCTL2_TLS;
0412     if (pcie->link_gen == 3)
0413         reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
0414     else if (pcie->link_gen == 2)
0415         reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
0416     else
0417         reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
0418     advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
0419 
0420     /* Enable link training after selecting PCIe generation */
0421     reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
0422     reg |= LINK_TRAINING_EN;
0423     advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
0424 
0425     /*
0426      * Reset PCIe card via PERST# signal. Some cards are not detected
0427      * during link training when they are in some non-initial state.
0428      */
0429     advk_pcie_issue_perst(pcie);
0430 
0431     /*
0432      * PERST# signal could have been asserted by pinctrl subsystem before
0433      * probe() callback has been called or issued explicitly by reset gpio
0434      * function advk_pcie_issue_perst(), making the endpoint going into
0435      * fundamental reset. As required by PCI Express spec (PCI Express
0436      * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
0437      * Conventional Reset) a delay for at least 100ms after such a reset
0438      * before sending a Configuration Request to the device is needed.
0439      * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
0440      * waits for link at least 900ms.
0441      */
0442     ret = advk_pcie_wait_for_link(pcie);
0443     if (ret < 0)
0444         dev_err(dev, "link never came up\n");
0445     else
0446         dev_info(dev, "link up\n");
0447 }
0448 
0449 /*
0450  * Set PCIe address window register which could be used for memory
0451  * mapping.
0452  */
0453 static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
0454                  phys_addr_t match, phys_addr_t remap,
0455                  phys_addr_t mask, u32 actions)
0456 {
0457     advk_writel(pcie, OB_WIN_ENABLE |
0458               lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
0459     advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
0460     advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
0461     advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
0462     advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
0463     advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
0464     advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
0465 }
0466 
0467 static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
0468 {
0469     advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
0470     advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
0471     advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
0472     advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
0473     advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
0474     advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
0475     advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
0476 }
0477 
0478 static void advk_pcie_setup_hw(struct advk_pcie *pcie)
0479 {
0480     phys_addr_t msi_addr;
0481     u32 reg;
0482     int i;
0483 
0484     /*
0485      * Configure PCIe Reference clock. Direction is from the PCIe
0486      * controller to the endpoint card, so enable transmitting of
0487      * Reference clock differential signal off-chip and disable
0488      * receiving off-chip differential signal.
0489      */
0490     reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
0491     reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
0492     reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE;
0493     advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
0494 
0495     /* Set to Direct mode */
0496     reg = advk_readl(pcie, CTRL_CONFIG_REG);
0497     reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
0498     reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
0499     advk_writel(pcie, reg, CTRL_CONFIG_REG);
0500 
0501     /* Set PCI global control register to RC mode */
0502     reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
0503     reg |= (IS_RC_MSK << IS_RC_SHIFT);
0504     advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
0505 
0506     /*
0507      * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
0508      * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
0509      * id in high 16 bits. Updating this register changes readback value of
0510      * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
0511      * for erratum 4.1: "The value of device and vendor ID is incorrect".
0512      */
0513     reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
0514     advk_writel(pcie, reg, VENDOR_ID_REG);
0515 
0516     /*
0517      * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
0518      * because the default value is Mass storage controller (0x010400).
0519      *
0520      * Note that this Aardvark PCI Bridge does not have compliant Type 1
0521      * Configuration Space and it even cannot be accessed via Aardvark's
0522      * PCI config space access method. Something like config space is
0523      * available in internal Aardvark registers starting at offset 0x0
0524      * and is reported as Type 0. In range 0x10 - 0x34 it has totally
0525      * different registers.
0526      *
0527      * Therefore driver uses emulation of PCI Bridge which emulates
0528      * access to configuration space via internal Aardvark registers or
0529      * emulated configuration buffer.
0530      */
0531     reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
0532     reg &= ~0xffffff00;
0533     reg |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
0534     advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
0535 
0536     /* Disable Root Bridge I/O space, memory space and bus mastering */
0537     reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
0538     reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
0539     advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
0540 
0541     /* Set Advanced Error Capabilities and Control PF0 register */
0542     reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
0543         PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
0544         PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
0545         PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
0546     advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
0547 
0548     /* Set PCIe Device Control register */
0549     reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
0550     reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
0551     reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
0552     reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
0553     reg &= ~PCI_EXP_DEVCTL_READRQ;
0554     reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
0555     reg |= PCI_EXP_DEVCTL_READRQ_512B;
0556     advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
0557 
0558     /* Program PCIe Control 2 to disable strict ordering */
0559     reg = PCIE_CORE_CTRL2_RESERVED |
0560         PCIE_CORE_CTRL2_TD_ENABLE;
0561     advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
0562 
0563     /* Set lane X1 */
0564     reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
0565     reg &= ~LANE_CNT_MSK;
0566     reg |= LANE_COUNT_1;
0567     advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
0568 
0569     /* Set MSI address */
0570     msi_addr = virt_to_phys(pcie);
0571     advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG);
0572     advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG);
0573 
0574     /* Enable MSI */
0575     reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
0576     reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
0577     advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
0578 
0579     /* Clear all interrupts */
0580     advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
0581     advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
0582     advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
0583     advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
0584 
0585     /* Disable All ISR0/1 and MSI Sources */
0586     advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
0587     advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
0588     advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
0589 
0590     /* Unmask summary MSI interrupt */
0591     reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
0592     reg &= ~PCIE_ISR0_MSI_INT_PENDING;
0593     advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
0594 
0595     /* Unmask PME interrupt for processing of PME requester */
0596     reg = advk_readl(pcie, PCIE_ISR0_MASK_REG);
0597     reg &= ~PCIE_MSG_PM_PME_MASK;
0598     advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
0599 
0600     /* Enable summary interrupt for GIC SPI source */
0601     reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
0602     advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
0603 
0604     /*
0605      * Enable AXI address window location generation:
0606      * When it is enabled, the default outbound window
0607      * configurations (Default User Field: 0xD0074CFC)
0608      * are used to transparent address translation for
0609      * the outbound transactions. Thus, PCIe address
0610      * windows are not required for transparent memory
0611      * access when default outbound window configuration
0612      * is set for memory access.
0613      */
0614     reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
0615     reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
0616     advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
0617 
0618     /*
0619      * Set memory access in Default User Field so it
0620      * is not required to configure PCIe address for
0621      * transparent memory access.
0622      */
0623     advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
0624 
0625     /*
0626      * Bypass the address window mapping for PIO:
0627      * Since PIO access already contains all required
0628      * info over AXI interface by PIO registers, the
0629      * address window is not required.
0630      */
0631     reg = advk_readl(pcie, PIO_CTRL);
0632     reg |= PIO_CTRL_ADDR_WIN_DISABLE;
0633     advk_writel(pcie, reg, PIO_CTRL);
0634 
0635     /*
0636      * Configure PCIe address windows for non-memory or
0637      * non-transparent access as by default PCIe uses
0638      * transparent memory access.
0639      */
0640     for (i = 0; i < pcie->wins_count; i++)
0641         advk_pcie_set_ob_win(pcie, i,
0642                      pcie->wins[i].match, pcie->wins[i].remap,
0643                      pcie->wins[i].mask, pcie->wins[i].actions);
0644 
0645     /* Disable remaining PCIe outbound windows */
0646     for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
0647         advk_pcie_disable_ob_win(pcie, i);
0648 
0649     advk_pcie_train_link(pcie);
0650 }
0651 
0652 static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
0653 {
0654     struct device *dev = &pcie->pdev->dev;
0655     u32 reg;
0656     unsigned int status;
0657     char *strcomp_status, *str_posted;
0658     int ret;
0659 
0660     reg = advk_readl(pcie, PIO_STAT);
0661     status = (reg & PIO_COMPLETION_STATUS_MASK) >>
0662         PIO_COMPLETION_STATUS_SHIFT;
0663 
0664     /*
0665      * According to HW spec, the PIO status check sequence as below:
0666      * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
0667      *    it still needs to check Error Status(bit11), only when this bit
0668      *    indicates no error happen, the operation is successful.
0669      * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
0670      *    means a PIO write error, and for PIO read it is successful with
0671      *    a read value of 0xFFFFFFFF.
0672      * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
0673      *    only means a PIO write error, and for PIO read it is successful
0674      *    with a read value of 0xFFFF0001.
0675      * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
0676      *    error for both PIO read and PIO write operation.
0677      * 5) other errors are indicated as 'unknown'.
0678      */
0679     switch (status) {
0680     case PIO_COMPLETION_STATUS_OK:
0681         if (reg & PIO_ERR_STATUS) {
0682             strcomp_status = "COMP_ERR";
0683             ret = -EFAULT;
0684             break;
0685         }
0686         /* Get the read result */
0687         if (val)
0688             *val = advk_readl(pcie, PIO_RD_DATA);
0689         /* No error */
0690         strcomp_status = NULL;
0691         ret = 0;
0692         break;
0693     case PIO_COMPLETION_STATUS_UR:
0694         strcomp_status = "UR";
0695         ret = -EOPNOTSUPP;
0696         break;
0697     case PIO_COMPLETION_STATUS_CRS:
0698         if (allow_crs && val) {
0699             /* PCIe r4.0, sec 2.3.2, says:
0700              * If CRS Software Visibility is enabled:
0701              * For a Configuration Read Request that includes both
0702              * bytes of the Vendor ID field of a device Function's
0703              * Configuration Space Header, the Root Complex must
0704              * complete the Request to the host by returning a
0705              * read-data value of 0001h for the Vendor ID field and
0706              * all '1's for any additional bytes included in the
0707              * request.
0708              *
0709              * So CRS in this case is not an error status.
0710              */
0711             *val = CFG_RD_CRS_VAL;
0712             strcomp_status = NULL;
0713             ret = 0;
0714             break;
0715         }
0716         /* PCIe r4.0, sec 2.3.2, says:
0717          * If CRS Software Visibility is not enabled, the Root Complex
0718          * must re-issue the Configuration Request as a new Request.
0719          * If CRS Software Visibility is enabled: For a Configuration
0720          * Write Request or for any other Configuration Read Request,
0721          * the Root Complex must re-issue the Configuration Request as
0722          * a new Request.
0723          * A Root Complex implementation may choose to limit the number
0724          * of Configuration Request/CRS Completion Status loops before
0725          * determining that something is wrong with the target of the
0726          * Request and taking appropriate action, e.g., complete the
0727          * Request to the host as a failed transaction.
0728          *
0729          * So return -EAGAIN and caller (pci-aardvark.c driver) will
0730          * re-issue request again up to the PIO_RETRY_CNT retries.
0731          */
0732         strcomp_status = "CRS";
0733         ret = -EAGAIN;
0734         break;
0735     case PIO_COMPLETION_STATUS_CA:
0736         strcomp_status = "CA";
0737         ret = -ECANCELED;
0738         break;
0739     default:
0740         strcomp_status = "Unknown";
0741         ret = -EINVAL;
0742         break;
0743     }
0744 
0745     if (!strcomp_status)
0746         return ret;
0747 
0748     if (reg & PIO_NON_POSTED_REQ)
0749         str_posted = "Non-posted";
0750     else
0751         str_posted = "Posted";
0752 
0753     dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
0754         str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
0755 
0756     return ret;
0757 }
0758 
0759 static int advk_pcie_wait_pio(struct advk_pcie *pcie)
0760 {
0761     struct device *dev = &pcie->pdev->dev;
0762     int i;
0763 
0764     for (i = 1; i <= PIO_RETRY_CNT; i++) {
0765         u32 start, isr;
0766 
0767         start = advk_readl(pcie, PIO_START);
0768         isr = advk_readl(pcie, PIO_ISR);
0769         if (!start && isr)
0770             return i;
0771         udelay(PIO_RETRY_DELAY);
0772     }
0773 
0774     dev_err(dev, "PIO read/write transfer time out\n");
0775     return -ETIMEDOUT;
0776 }
0777 
0778 static pci_bridge_emul_read_status_t
0779 advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
0780                     int reg, u32 *value)
0781 {
0782     struct advk_pcie *pcie = bridge->data;
0783 
0784     switch (reg) {
0785     case PCI_COMMAND:
0786         *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
0787         return PCI_BRIDGE_EMUL_HANDLED;
0788 
0789     case PCI_INTERRUPT_LINE: {
0790         /*
0791          * From the whole 32bit register we support reading from HW only
0792          * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
0793          * Other bits are retrieved only from emulated config buffer.
0794          */
0795         __le32 *cfgspace = (__le32 *)&bridge->conf;
0796         u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
0797         if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK)
0798             val &= ~(PCI_BRIDGE_CTL_SERR << 16);
0799         else
0800             val |= PCI_BRIDGE_CTL_SERR << 16;
0801         if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
0802             val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
0803         else
0804             val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
0805         *value = val;
0806         return PCI_BRIDGE_EMUL_HANDLED;
0807     }
0808 
0809     default:
0810         return PCI_BRIDGE_EMUL_NOT_HANDLED;
0811     }
0812 }
0813 
0814 static void
0815 advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
0816                      int reg, u32 old, u32 new, u32 mask)
0817 {
0818     struct advk_pcie *pcie = bridge->data;
0819 
0820     switch (reg) {
0821     case PCI_COMMAND:
0822         advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
0823         break;
0824 
0825     case PCI_INTERRUPT_LINE:
0826         /*
0827          * According to Figure 6-3: Pseudo Logic Diagram for Error
0828          * Message Controls in PCIe base specification, SERR# Enable bit
0829          * in Bridge Control register enable receiving of ERR_* messages
0830          */
0831         if (mask & (PCI_BRIDGE_CTL_SERR << 16)) {
0832             u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
0833             if (new & (PCI_BRIDGE_CTL_SERR << 16))
0834                 val &= ~PCIE_ISR0_ERR_MASK;
0835             else
0836                 val |= PCIE_ISR0_ERR_MASK;
0837             advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
0838         }
0839         if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
0840             u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
0841             if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
0842                 val |= HOT_RESET_GEN;
0843             else
0844                 val &= ~HOT_RESET_GEN;
0845             advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
0846         }
0847         break;
0848 
0849     default:
0850         break;
0851     }
0852 }
0853 
0854 static pci_bridge_emul_read_status_t
0855 advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
0856                     int reg, u32 *value)
0857 {
0858     struct advk_pcie *pcie = bridge->data;
0859 
0860 
0861     switch (reg) {
0862     /*
0863      * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
0864      * also supported, but do not need to be handled here, because their
0865      * values are stored in emulated config space buffer, and we read them
0866      * from there when needed.
0867      */
0868 
0869     case PCI_EXP_LNKCAP: {
0870         u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
0871         /*
0872          * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
0873          * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
0874          * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
0875          */
0876         val |= PCI_EXP_LNKCAP_DLLLARC;
0877         *value = val;
0878         return PCI_BRIDGE_EMUL_HANDLED;
0879     }
0880 
0881     case PCI_EXP_LNKCTL: {
0882         /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
0883         u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
0884             ~(PCI_EXP_LNKSTA_LT << 16);
0885         if (advk_pcie_link_training(pcie))
0886             val |= (PCI_EXP_LNKSTA_LT << 16);
0887         if (advk_pcie_link_active(pcie))
0888             val |= (PCI_EXP_LNKSTA_DLLLA << 16);
0889         *value = val;
0890         return PCI_BRIDGE_EMUL_HANDLED;
0891     }
0892 
0893     case PCI_EXP_DEVCAP:
0894     case PCI_EXP_DEVCTL:
0895     case PCI_EXP_DEVCAP2:
0896     case PCI_EXP_DEVCTL2:
0897     case PCI_EXP_LNKCAP2:
0898     case PCI_EXP_LNKCTL2:
0899         *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
0900         return PCI_BRIDGE_EMUL_HANDLED;
0901 
0902     default:
0903         return PCI_BRIDGE_EMUL_NOT_HANDLED;
0904     }
0905 
0906 }
0907 
0908 static void
0909 advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
0910                      int reg, u32 old, u32 new, u32 mask)
0911 {
0912     struct advk_pcie *pcie = bridge->data;
0913 
0914     switch (reg) {
0915     case PCI_EXP_LNKCTL:
0916         advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
0917         if (new & PCI_EXP_LNKCTL_RL)
0918             advk_pcie_wait_for_retrain(pcie);
0919         break;
0920 
0921     case PCI_EXP_RTCTL: {
0922         u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
0923         /* Only emulation of PMEIE and CRSSVE bits is provided */
0924         rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
0925         bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
0926         break;
0927     }
0928 
0929     /*
0930      * PCI_EXP_RTSTA is also supported, but does not need to be handled
0931      * here, because its value is stored in emulated config space buffer,
0932      * and we write it there when needed.
0933      */
0934 
0935     case PCI_EXP_DEVCTL:
0936     case PCI_EXP_DEVCTL2:
0937     case PCI_EXP_LNKCTL2:
0938         advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
0939         break;
0940 
0941     default:
0942         break;
0943     }
0944 }
0945 
0946 static pci_bridge_emul_read_status_t
0947 advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
0948                    int reg, u32 *value)
0949 {
0950     struct advk_pcie *pcie = bridge->data;
0951 
0952     switch (reg) {
0953     case 0:
0954         *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
0955 
0956         /*
0957          * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
0958          * 3700 Functional Specification does not document registers
0959          * at those addresses.
0960          *
0961          * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
0962          * Reporting Capability header the last Extended Capability.
0963          * If we obtain documentation for those registers in the
0964          * future, this can be changed.
0965          */
0966         *value &= 0x000fffff;
0967         return PCI_BRIDGE_EMUL_HANDLED;
0968 
0969     case PCI_ERR_UNCOR_STATUS:
0970     case PCI_ERR_UNCOR_MASK:
0971     case PCI_ERR_UNCOR_SEVER:
0972     case PCI_ERR_COR_STATUS:
0973     case PCI_ERR_COR_MASK:
0974     case PCI_ERR_CAP:
0975     case PCI_ERR_HEADER_LOG + 0:
0976     case PCI_ERR_HEADER_LOG + 4:
0977     case PCI_ERR_HEADER_LOG + 8:
0978     case PCI_ERR_HEADER_LOG + 12:
0979     case PCI_ERR_ROOT_COMMAND:
0980     case PCI_ERR_ROOT_STATUS:
0981     case PCI_ERR_ROOT_ERR_SRC:
0982         *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
0983         return PCI_BRIDGE_EMUL_HANDLED;
0984 
0985     default:
0986         return PCI_BRIDGE_EMUL_NOT_HANDLED;
0987     }
0988 }
0989 
0990 static void
0991 advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
0992                     int reg, u32 old, u32 new, u32 mask)
0993 {
0994     struct advk_pcie *pcie = bridge->data;
0995 
0996     switch (reg) {
0997     /* These are W1C registers, so clear other bits */
0998     case PCI_ERR_UNCOR_STATUS:
0999     case PCI_ERR_COR_STATUS:
1000     case PCI_ERR_ROOT_STATUS:
1001         new &= mask;
1002         fallthrough;
1003 
1004     case PCI_ERR_UNCOR_MASK:
1005     case PCI_ERR_UNCOR_SEVER:
1006     case PCI_ERR_COR_MASK:
1007     case PCI_ERR_CAP:
1008     case PCI_ERR_HEADER_LOG + 0:
1009     case PCI_ERR_HEADER_LOG + 4:
1010     case PCI_ERR_HEADER_LOG + 8:
1011     case PCI_ERR_HEADER_LOG + 12:
1012     case PCI_ERR_ROOT_COMMAND:
1013     case PCI_ERR_ROOT_ERR_SRC:
1014         advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
1015         break;
1016 
1017     default:
1018         break;
1019     }
1020 }
1021 
1022 static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
1023     .read_base = advk_pci_bridge_emul_base_conf_read,
1024     .write_base = advk_pci_bridge_emul_base_conf_write,
1025     .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
1026     .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
1027     .read_ext = advk_pci_bridge_emul_ext_conf_read,
1028     .write_ext = advk_pci_bridge_emul_ext_conf_write,
1029 };
1030 
1031 /*
1032  * Initialize the configuration space of the PCI-to-PCI bridge
1033  * associated with the given PCIe interface.
1034  */
1035 static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
1036 {
1037     struct pci_bridge_emul *bridge = &pcie->bridge;
1038 
1039     bridge->conf.vendor =
1040         cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
1041     bridge->conf.device =
1042         cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
1043     bridge->conf.class_revision =
1044         cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
1045 
1046     /* Support 32 bits I/O addressing */
1047     bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
1048     bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
1049 
1050     /* Support 64 bits memory pref */
1051     bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
1052     bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
1053 
1054     /* Support interrupt A for MSI feature */
1055     bridge->conf.intpin = PCI_INTERRUPT_INTA;
1056 
1057     /*
1058      * Aardvark HW provides PCIe Capability structure in version 2 and
1059      * indicate slot support, which is emulated.
1060      */
1061     bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
1062 
1063     /*
1064      * Set Presence Detect State bit permanently since there is no support
1065      * for unplugging the card nor detecting whether it is plugged. (If a
1066      * platform exists in the future that supports it, via a GPIO for
1067      * example, it should be implemented via this bit.)
1068      *
1069      * Set physical slot number to 1 since there is only one port and zero
1070      * value is reserved for ports within the same silicon as Root Port
1071      * which is not our case.
1072      */
1073     bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
1074                                1));
1075     bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
1076 
1077     /* Indicates supports for Completion Retry Status */
1078     bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
1079 
1080     bridge->has_pcie = true;
1081     bridge->data = pcie;
1082     bridge->ops = &advk_pci_bridge_emul_ops;
1083 
1084     return pci_bridge_emul_init(bridge, 0);
1085 }
1086 
1087 static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
1088                   int devfn)
1089 {
1090     if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0)
1091         return false;
1092 
1093     /*
1094      * If the link goes down after we check for link-up, we have a problem:
1095      * if a PIO request is executed while link-down, the whole controller
1096      * gets stuck in a non-functional state, and even after link comes up
1097      * again, PIO requests won't work anymore, and a reset of the whole PCIe
1098      * controller is needed. Therefore we need to prevent sending PIO
1099      * requests while the link is down.
1100      */
1101     if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie))
1102         return false;
1103 
1104     return true;
1105 }
1106 
1107 static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
1108 {
1109     struct device *dev = &pcie->pdev->dev;
1110 
1111     /*
1112      * Trying to start a new PIO transfer when previous has not completed
1113      * cause External Abort on CPU which results in kernel panic:
1114      *
1115      *     SError Interrupt on CPU0, code 0xbf000002 -- SError
1116      *     Kernel panic - not syncing: Asynchronous SError Interrupt
1117      *
1118      * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
1119      * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
1120      * concurrent calls at the same time. But because PIO transfer may take
1121      * about 1.5s when link is down or card is disconnected, it means that
1122      * advk_pcie_wait_pio() does not always have to wait for completion.
1123      *
1124      * Some versions of ARM Trusted Firmware handles this External Abort at
1125      * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
1126      * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
1127      */
1128     if (advk_readl(pcie, PIO_START)) {
1129         dev_err(dev, "Previous PIO read/write transfer is still running\n");
1130         return true;
1131     }
1132 
1133     return false;
1134 }
1135 
1136 static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1137                  int where, int size, u32 *val)
1138 {
1139     struct advk_pcie *pcie = bus->sysdata;
1140     int retry_count;
1141     bool allow_crs;
1142     u32 reg;
1143     int ret;
1144 
1145     if (!advk_pcie_valid_device(pcie, bus, devfn))
1146         return PCIBIOS_DEVICE_NOT_FOUND;
1147 
1148     if (pci_is_root_bus(bus))
1149         return pci_bridge_emul_conf_read(&pcie->bridge, where,
1150                          size, val);
1151 
1152     /*
1153      * Completion Retry Status is possible to return only when reading all
1154      * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
1155      * CRSSVE flag on Root Bridge is enabled.
1156      */
1157     allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
1158             (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
1159              PCI_EXP_RTCTL_CRSSVE);
1160 
1161     if (advk_pcie_pio_is_running(pcie))
1162         goto try_crs;
1163 
1164     /* Program the control register */
1165     reg = advk_readl(pcie, PIO_CTRL);
1166     reg &= ~PIO_CTRL_TYPE_MASK;
1167     if (pci_is_root_bus(bus->parent))
1168         reg |= PCIE_CONFIG_RD_TYPE0;
1169     else
1170         reg |= PCIE_CONFIG_RD_TYPE1;
1171     advk_writel(pcie, reg, PIO_CTRL);
1172 
1173     /* Program the address registers */
1174     reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
1175     advk_writel(pcie, reg, PIO_ADDR_LS);
1176     advk_writel(pcie, 0, PIO_ADDR_MS);
1177 
1178     /* Program the data strobe */
1179     advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
1180 
1181     retry_count = 0;
1182     do {
1183         /* Clear PIO DONE ISR and start the transfer */
1184         advk_writel(pcie, 1, PIO_ISR);
1185         advk_writel(pcie, 1, PIO_START);
1186 
1187         ret = advk_pcie_wait_pio(pcie);
1188         if (ret < 0)
1189             goto try_crs;
1190 
1191         retry_count += ret;
1192 
1193         /* Check PIO status and get the read result */
1194         ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
1195     } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
1196 
1197     if (ret < 0)
1198         goto fail;
1199 
1200     if (size == 1)
1201         *val = (*val >> (8 * (where & 3))) & 0xff;
1202     else if (size == 2)
1203         *val = (*val >> (8 * (where & 3))) & 0xffff;
1204 
1205     return PCIBIOS_SUCCESSFUL;
1206 
1207 try_crs:
1208     /*
1209      * If it is possible, return Completion Retry Status so that caller
1210      * tries to issue the request again instead of failing.
1211      */
1212     if (allow_crs) {
1213         *val = CFG_RD_CRS_VAL;
1214         return PCIBIOS_SUCCESSFUL;
1215     }
1216 
1217 fail:
1218     *val = 0xffffffff;
1219     return PCIBIOS_SET_FAILED;
1220 }
1221 
1222 static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
1223                 int where, int size, u32 val)
1224 {
1225     struct advk_pcie *pcie = bus->sysdata;
1226     u32 reg;
1227     u32 data_strobe = 0x0;
1228     int retry_count;
1229     int offset;
1230     int ret;
1231 
1232     if (!advk_pcie_valid_device(pcie, bus, devfn))
1233         return PCIBIOS_DEVICE_NOT_FOUND;
1234 
1235     if (pci_is_root_bus(bus))
1236         return pci_bridge_emul_conf_write(&pcie->bridge, where,
1237                           size, val);
1238 
1239     if (where % size)
1240         return PCIBIOS_SET_FAILED;
1241 
1242     if (advk_pcie_pio_is_running(pcie))
1243         return PCIBIOS_SET_FAILED;
1244 
1245     /* Program the control register */
1246     reg = advk_readl(pcie, PIO_CTRL);
1247     reg &= ~PIO_CTRL_TYPE_MASK;
1248     if (pci_is_root_bus(bus->parent))
1249         reg |= PCIE_CONFIG_WR_TYPE0;
1250     else
1251         reg |= PCIE_CONFIG_WR_TYPE1;
1252     advk_writel(pcie, reg, PIO_CTRL);
1253 
1254     /* Program the address registers */
1255     reg = ALIGN_DOWN(PCIE_ECAM_OFFSET(bus->number, devfn, where), 4);
1256     advk_writel(pcie, reg, PIO_ADDR_LS);
1257     advk_writel(pcie, 0, PIO_ADDR_MS);
1258 
1259     /* Calculate the write strobe */
1260     offset      = where & 0x3;
1261     reg         = val << (8 * offset);
1262     data_strobe = GENMASK(size - 1, 0) << offset;
1263 
1264     /* Program the data register */
1265     advk_writel(pcie, reg, PIO_WR_DATA);
1266 
1267     /* Program the data strobe */
1268     advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
1269 
1270     retry_count = 0;
1271     do {
1272         /* Clear PIO DONE ISR and start the transfer */
1273         advk_writel(pcie, 1, PIO_ISR);
1274         advk_writel(pcie, 1, PIO_START);
1275 
1276         ret = advk_pcie_wait_pio(pcie);
1277         if (ret < 0)
1278             return PCIBIOS_SET_FAILED;
1279 
1280         retry_count += ret;
1281 
1282         ret = advk_pcie_check_pio_status(pcie, false, NULL);
1283     } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
1284 
1285     return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
1286 }
1287 
1288 static struct pci_ops advk_pcie_ops = {
1289     .read = advk_pcie_rd_conf,
1290     .write = advk_pcie_wr_conf,
1291 };
1292 
1293 static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
1294                      struct msi_msg *msg)
1295 {
1296     struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
1297     phys_addr_t msi_addr = virt_to_phys(pcie);
1298 
1299     msg->address_lo = lower_32_bits(msi_addr);
1300     msg->address_hi = upper_32_bits(msi_addr);
1301     msg->data = data->hwirq;
1302 }
1303 
1304 static int advk_msi_set_affinity(struct irq_data *irq_data,
1305                  const struct cpumask *mask, bool force)
1306 {
1307     return -EINVAL;
1308 }
1309 
1310 static void advk_msi_irq_mask(struct irq_data *d)
1311 {
1312     struct advk_pcie *pcie = d->domain->host_data;
1313     irq_hw_number_t hwirq = irqd_to_hwirq(d);
1314     unsigned long flags;
1315     u32 mask;
1316 
1317     raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
1318     mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1319     mask |= BIT(hwirq);
1320     advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
1321     raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
1322 }
1323 
1324 static void advk_msi_irq_unmask(struct irq_data *d)
1325 {
1326     struct advk_pcie *pcie = d->domain->host_data;
1327     irq_hw_number_t hwirq = irqd_to_hwirq(d);
1328     unsigned long flags;
1329     u32 mask;
1330 
1331     raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags);
1332     mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1333     mask &= ~BIT(hwirq);
1334     advk_writel(pcie, mask, PCIE_MSI_MASK_REG);
1335     raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
1336 }
1337 
1338 static void advk_msi_top_irq_mask(struct irq_data *d)
1339 {
1340     pci_msi_mask_irq(d);
1341     irq_chip_mask_parent(d);
1342 }
1343 
1344 static void advk_msi_top_irq_unmask(struct irq_data *d)
1345 {
1346     pci_msi_unmask_irq(d);
1347     irq_chip_unmask_parent(d);
1348 }
1349 
1350 static struct irq_chip advk_msi_bottom_irq_chip = {
1351     .name           = "MSI",
1352     .irq_compose_msi_msg    = advk_msi_irq_compose_msi_msg,
1353     .irq_set_affinity   = advk_msi_set_affinity,
1354     .irq_mask       = advk_msi_irq_mask,
1355     .irq_unmask     = advk_msi_irq_unmask,
1356 };
1357 
1358 static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
1359                      unsigned int virq,
1360                      unsigned int nr_irqs, void *args)
1361 {
1362     struct advk_pcie *pcie = domain->host_data;
1363     int hwirq, i;
1364 
1365     mutex_lock(&pcie->msi_used_lock);
1366     hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM,
1367                     order_base_2(nr_irqs));
1368     mutex_unlock(&pcie->msi_used_lock);
1369     if (hwirq < 0)
1370         return -ENOSPC;
1371 
1372     for (i = 0; i < nr_irqs; i++)
1373         irq_domain_set_info(domain, virq + i, hwirq + i,
1374                     &advk_msi_bottom_irq_chip,
1375                     domain->host_data, handle_simple_irq,
1376                     NULL, NULL);
1377 
1378     return 0;
1379 }
1380 
1381 static void advk_msi_irq_domain_free(struct irq_domain *domain,
1382                      unsigned int virq, unsigned int nr_irqs)
1383 {
1384     struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1385     struct advk_pcie *pcie = domain->host_data;
1386 
1387     mutex_lock(&pcie->msi_used_lock);
1388     bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
1389     mutex_unlock(&pcie->msi_used_lock);
1390 }
1391 
1392 static const struct irq_domain_ops advk_msi_domain_ops = {
1393     .alloc = advk_msi_irq_domain_alloc,
1394     .free = advk_msi_irq_domain_free,
1395 };
1396 
1397 static void advk_pcie_irq_mask(struct irq_data *d)
1398 {
1399     struct advk_pcie *pcie = d->domain->host_data;
1400     irq_hw_number_t hwirq = irqd_to_hwirq(d);
1401     unsigned long flags;
1402     u32 mask;
1403 
1404     raw_spin_lock_irqsave(&pcie->irq_lock, flags);
1405     mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1406     mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
1407     advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1408     raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
1409 }
1410 
1411 static void advk_pcie_irq_unmask(struct irq_data *d)
1412 {
1413     struct advk_pcie *pcie = d->domain->host_data;
1414     irq_hw_number_t hwirq = irqd_to_hwirq(d);
1415     unsigned long flags;
1416     u32 mask;
1417 
1418     raw_spin_lock_irqsave(&pcie->irq_lock, flags);
1419     mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1420     mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
1421     advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1422     raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
1423 }
1424 
1425 static int advk_pcie_irq_map(struct irq_domain *h,
1426                  unsigned int virq, irq_hw_number_t hwirq)
1427 {
1428     struct advk_pcie *pcie = h->host_data;
1429 
1430     irq_set_status_flags(virq, IRQ_LEVEL);
1431     irq_set_chip_and_handler(virq, &pcie->irq_chip,
1432                  handle_level_irq);
1433     irq_set_chip_data(virq, pcie);
1434 
1435     return 0;
1436 }
1437 
1438 static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
1439     .map = advk_pcie_irq_map,
1440     .xlate = irq_domain_xlate_onecell,
1441 };
1442 
1443 static struct irq_chip advk_msi_irq_chip = {
1444     .name       = "advk-MSI",
1445     .irq_mask   = advk_msi_top_irq_mask,
1446     .irq_unmask = advk_msi_top_irq_unmask,
1447 };
1448 
1449 static struct msi_domain_info advk_msi_domain_info = {
1450     .flags  = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1451           MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
1452     .chip   = &advk_msi_irq_chip,
1453 };
1454 
1455 static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
1456 {
1457     struct device *dev = &pcie->pdev->dev;
1458 
1459     raw_spin_lock_init(&pcie->msi_irq_lock);
1460     mutex_init(&pcie->msi_used_lock);
1461 
1462     pcie->msi_inner_domain =
1463         irq_domain_add_linear(NULL, MSI_IRQ_NUM,
1464                       &advk_msi_domain_ops, pcie);
1465     if (!pcie->msi_inner_domain)
1466         return -ENOMEM;
1467 
1468     pcie->msi_domain =
1469         pci_msi_create_irq_domain(dev_fwnode(dev),
1470                       &advk_msi_domain_info,
1471                       pcie->msi_inner_domain);
1472     if (!pcie->msi_domain) {
1473         irq_domain_remove(pcie->msi_inner_domain);
1474         return -ENOMEM;
1475     }
1476 
1477     return 0;
1478 }
1479 
1480 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
1481 {
1482     irq_domain_remove(pcie->msi_domain);
1483     irq_domain_remove(pcie->msi_inner_domain);
1484 }
1485 
1486 static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
1487 {
1488     struct device *dev = &pcie->pdev->dev;
1489     struct device_node *node = dev->of_node;
1490     struct device_node *pcie_intc_node;
1491     struct irq_chip *irq_chip;
1492     int ret = 0;
1493 
1494     raw_spin_lock_init(&pcie->irq_lock);
1495 
1496     pcie_intc_node =  of_get_next_child(node, NULL);
1497     if (!pcie_intc_node) {
1498         dev_err(dev, "No PCIe Intc node found\n");
1499         return -ENODEV;
1500     }
1501 
1502     irq_chip = &pcie->irq_chip;
1503 
1504     irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
1505                     dev_name(dev));
1506     if (!irq_chip->name) {
1507         ret = -ENOMEM;
1508         goto out_put_node;
1509     }
1510 
1511     irq_chip->irq_mask = advk_pcie_irq_mask;
1512     irq_chip->irq_unmask = advk_pcie_irq_unmask;
1513 
1514     pcie->irq_domain =
1515         irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1516                       &advk_pcie_irq_domain_ops, pcie);
1517     if (!pcie->irq_domain) {
1518         dev_err(dev, "Failed to get a INTx IRQ domain\n");
1519         ret = -ENOMEM;
1520         goto out_put_node;
1521     }
1522 
1523 out_put_node:
1524     of_node_put(pcie_intc_node);
1525     return ret;
1526 }
1527 
1528 static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
1529 {
1530     irq_domain_remove(pcie->irq_domain);
1531 }
1532 
1533 static struct irq_chip advk_rp_irq_chip = {
1534     .name = "advk-RP",
1535 };
1536 
1537 static int advk_pcie_rp_irq_map(struct irq_domain *h,
1538                 unsigned int virq, irq_hw_number_t hwirq)
1539 {
1540     struct advk_pcie *pcie = h->host_data;
1541 
1542     irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq);
1543     irq_set_chip_data(virq, pcie);
1544 
1545     return 0;
1546 }
1547 
1548 static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
1549     .map = advk_pcie_rp_irq_map,
1550     .xlate = irq_domain_xlate_onecell,
1551 };
1552 
1553 static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
1554 {
1555     pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
1556                             &advk_pcie_rp_irq_domain_ops,
1557                             pcie);
1558     if (!pcie->rp_irq_domain) {
1559         dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
1560         return -ENOMEM;
1561     }
1562 
1563     return 0;
1564 }
1565 
1566 static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie)
1567 {
1568     irq_domain_remove(pcie->rp_irq_domain);
1569 }
1570 
1571 static void advk_pcie_handle_pme(struct advk_pcie *pcie)
1572 {
1573     u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16;
1574 
1575     advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG);
1576 
1577     /*
1578      * PCIE_MSG_LOG_REG contains the last inbound message, so store
1579      * the requester ID only when PME was not asserted yet.
1580      * Also do not trigger PME interrupt when PME is still asserted.
1581      */
1582     if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) {
1583         pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME);
1584 
1585         /*
1586          * Trigger PME interrupt only if PMEIE bit in Root Control is set.
1587          * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
1588          */
1589         if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE))
1590             return;
1591 
1592         if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
1593             dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n");
1594     }
1595 }
1596 
1597 static void advk_pcie_handle_msi(struct advk_pcie *pcie)
1598 {
1599     u32 msi_val, msi_mask, msi_status, msi_idx;
1600 
1601     msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
1602     msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
1603     msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
1604 
1605     for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
1606         if (!(BIT(msi_idx) & msi_status))
1607             continue;
1608 
1609         advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
1610         if (generic_handle_domain_irq(pcie->msi_inner_domain, msi_idx) == -EINVAL)
1611             dev_err_ratelimited(&pcie->pdev->dev, "unexpected MSI 0x%02x\n", msi_idx);
1612     }
1613 
1614     advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
1615             PCIE_ISR0_REG);
1616 }
1617 
1618 static void advk_pcie_handle_int(struct advk_pcie *pcie)
1619 {
1620     u32 isr0_val, isr0_mask, isr0_status;
1621     u32 isr1_val, isr1_mask, isr1_status;
1622     int i;
1623 
1624     isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
1625     isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1626     isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
1627 
1628     isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
1629     isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1630     isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
1631 
1632     /* Process PME interrupt as the first one to do not miss PME requester id */
1633     if (isr0_status & PCIE_MSG_PM_PME_MASK)
1634         advk_pcie_handle_pme(pcie);
1635 
1636     /* Process ERR interrupt */
1637     if (isr0_status & PCIE_ISR0_ERR_MASK) {
1638         advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG);
1639 
1640         /*
1641          * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
1642          * PCIe interrupt 0
1643          */
1644         if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL)
1645             dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n");
1646     }
1647 
1648     /* Process MSI interrupts */
1649     if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
1650         advk_pcie_handle_msi(pcie);
1651 
1652     /* Process legacy interrupts */
1653     for (i = 0; i < PCI_NUM_INTX; i++) {
1654         if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
1655             continue;
1656 
1657         advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
1658                 PCIE_ISR1_REG);
1659 
1660         if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL)
1661             dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n",
1662                         (char)i + 'A');
1663     }
1664 }
1665 
1666 static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
1667 {
1668     struct advk_pcie *pcie = arg;
1669     u32 status;
1670 
1671     status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
1672     if (!(status & PCIE_IRQ_CORE_INT))
1673         return IRQ_NONE;
1674 
1675     advk_pcie_handle_int(pcie);
1676 
1677     /* Clear interrupt */
1678     advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
1679 
1680     return IRQ_HANDLED;
1681 }
1682 
1683 static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1684 {
1685     struct advk_pcie *pcie = dev->bus->sysdata;
1686 
1687     /*
1688      * Emulated root bridge has its own emulated irq chip and irq domain.
1689      * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
1690      * hwirq for irq_create_mapping() is indexed from zero.
1691      */
1692     if (pci_is_root_bus(dev->bus))
1693         return irq_create_mapping(pcie->rp_irq_domain, pin - 1);
1694     else
1695         return of_irq_parse_and_map_pci(dev, slot, pin);
1696 }
1697 
1698 static void advk_pcie_disable_phy(struct advk_pcie *pcie)
1699 {
1700     phy_power_off(pcie->phy);
1701     phy_exit(pcie->phy);
1702 }
1703 
1704 static int advk_pcie_enable_phy(struct advk_pcie *pcie)
1705 {
1706     int ret;
1707 
1708     if (!pcie->phy)
1709         return 0;
1710 
1711     ret = phy_init(pcie->phy);
1712     if (ret)
1713         return ret;
1714 
1715     ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
1716     if (ret) {
1717         phy_exit(pcie->phy);
1718         return ret;
1719     }
1720 
1721     ret = phy_power_on(pcie->phy);
1722     if (ret) {
1723         phy_exit(pcie->phy);
1724         return ret;
1725     }
1726 
1727     return 0;
1728 }
1729 
1730 static int advk_pcie_setup_phy(struct advk_pcie *pcie)
1731 {
1732     struct device *dev = &pcie->pdev->dev;
1733     struct device_node *node = dev->of_node;
1734     int ret = 0;
1735 
1736     pcie->phy = devm_of_phy_get(dev, node, NULL);
1737     if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
1738         return PTR_ERR(pcie->phy);
1739 
1740     /* Old bindings miss the PHY handle */
1741     if (IS_ERR(pcie->phy)) {
1742         dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
1743         pcie->phy = NULL;
1744         return 0;
1745     }
1746 
1747     ret = advk_pcie_enable_phy(pcie);
1748     if (ret)
1749         dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
1750 
1751     return ret;
1752 }
1753 
1754 static int advk_pcie_probe(struct platform_device *pdev)
1755 {
1756     struct device *dev = &pdev->dev;
1757     struct advk_pcie *pcie;
1758     struct pci_host_bridge *bridge;
1759     struct resource_entry *entry;
1760     int ret, irq;
1761 
1762     bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
1763     if (!bridge)
1764         return -ENOMEM;
1765 
1766     pcie = pci_host_bridge_priv(bridge);
1767     pcie->pdev = pdev;
1768     platform_set_drvdata(pdev, pcie);
1769 
1770     resource_list_for_each_entry(entry, &bridge->windows) {
1771         resource_size_t start = entry->res->start;
1772         resource_size_t size = resource_size(entry->res);
1773         unsigned long type = resource_type(entry->res);
1774         u64 win_size;
1775 
1776         /*
1777          * Aardvark hardware allows to configure also PCIe window
1778          * for config type 0 and type 1 mapping, but driver uses
1779          * only PIO for issuing configuration transfers which does
1780          * not use PCIe window configuration.
1781          */
1782         if (type != IORESOURCE_MEM && type != IORESOURCE_IO)
1783             continue;
1784 
1785         /*
1786          * Skip transparent memory resources. Default outbound access
1787          * configuration is set to transparent memory access so it
1788          * does not need window configuration.
1789          */
1790         if (type == IORESOURCE_MEM && entry->offset == 0)
1791             continue;
1792 
1793         /*
1794          * The n-th PCIe window is configured by tuple (match, remap, mask)
1795          * and an access to address A uses this window if A matches the
1796          * match with given mask.
1797          * So every PCIe window size must be a power of two and every start
1798          * address must be aligned to window size. Minimal size is 64 KiB
1799          * because lower 16 bits of mask must be zero. Remapped address
1800          * may have set only bits from the mask.
1801          */
1802         while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
1803             /* Calculate the largest aligned window size */
1804             win_size = (1ULL << (fls64(size)-1)) |
1805                    (start ? (1ULL << __ffs64(start)) : 0);
1806             win_size = 1ULL << __ffs64(win_size);
1807             if (win_size < 0x10000)
1808                 break;
1809 
1810             dev_dbg(dev,
1811                 "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
1812                 pcie->wins_count, (unsigned long long)start,
1813                 (unsigned long long)start + win_size, type);
1814 
1815             if (type == IORESOURCE_IO) {
1816                 pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
1817                 pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
1818             } else {
1819                 pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
1820                 pcie->wins[pcie->wins_count].match = start;
1821             }
1822             pcie->wins[pcie->wins_count].remap = start - entry->offset;
1823             pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
1824 
1825             if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
1826                 break;
1827 
1828             start += win_size;
1829             size -= win_size;
1830             pcie->wins_count++;
1831         }
1832 
1833         if (size > 0) {
1834             dev_err(&pcie->pdev->dev,
1835                 "Invalid PCIe region [0x%llx-0x%llx]\n",
1836                 (unsigned long long)entry->res->start,
1837                 (unsigned long long)entry->res->end + 1);
1838             return -EINVAL;
1839         }
1840     }
1841 
1842     pcie->base = devm_platform_ioremap_resource(pdev, 0);
1843     if (IS_ERR(pcie->base))
1844         return PTR_ERR(pcie->base);
1845 
1846     irq = platform_get_irq(pdev, 0);
1847     if (irq < 0)
1848         return irq;
1849 
1850     ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
1851                    IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
1852                    pcie);
1853     if (ret) {
1854         dev_err(dev, "Failed to register interrupt\n");
1855         return ret;
1856     }
1857 
1858     pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
1859                                "reset-gpios", 0,
1860                                GPIOD_OUT_LOW,
1861                                "pcie1-reset");
1862     ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
1863     if (ret) {
1864         if (ret == -ENOENT) {
1865             pcie->reset_gpio = NULL;
1866         } else {
1867             if (ret != -EPROBE_DEFER)
1868                 dev_err(dev, "Failed to get reset-gpio: %i\n",
1869                     ret);
1870             return ret;
1871         }
1872     }
1873 
1874     ret = of_pci_get_max_link_speed(dev->of_node);
1875     if (ret <= 0 || ret > 3)
1876         pcie->link_gen = 3;
1877     else
1878         pcie->link_gen = ret;
1879 
1880     ret = advk_pcie_setup_phy(pcie);
1881     if (ret)
1882         return ret;
1883 
1884     advk_pcie_setup_hw(pcie);
1885 
1886     ret = advk_sw_pci_bridge_init(pcie);
1887     if (ret) {
1888         dev_err(dev, "Failed to register emulated root PCI bridge\n");
1889         return ret;
1890     }
1891 
1892     ret = advk_pcie_init_irq_domain(pcie);
1893     if (ret) {
1894         dev_err(dev, "Failed to initialize irq\n");
1895         return ret;
1896     }
1897 
1898     ret = advk_pcie_init_msi_irq_domain(pcie);
1899     if (ret) {
1900         dev_err(dev, "Failed to initialize irq\n");
1901         advk_pcie_remove_irq_domain(pcie);
1902         return ret;
1903     }
1904 
1905     ret = advk_pcie_init_rp_irq_domain(pcie);
1906     if (ret) {
1907         dev_err(dev, "Failed to initialize irq\n");
1908         advk_pcie_remove_msi_irq_domain(pcie);
1909         advk_pcie_remove_irq_domain(pcie);
1910         return ret;
1911     }
1912 
1913     bridge->sysdata = pcie;
1914     bridge->ops = &advk_pcie_ops;
1915     bridge->map_irq = advk_pcie_map_irq;
1916 
1917     ret = pci_host_probe(bridge);
1918     if (ret < 0) {
1919         advk_pcie_remove_rp_irq_domain(pcie);
1920         advk_pcie_remove_msi_irq_domain(pcie);
1921         advk_pcie_remove_irq_domain(pcie);
1922         return ret;
1923     }
1924 
1925     return 0;
1926 }
1927 
1928 static int advk_pcie_remove(struct platform_device *pdev)
1929 {
1930     struct advk_pcie *pcie = platform_get_drvdata(pdev);
1931     struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1932     u32 val;
1933     int i;
1934 
1935     /* Remove PCI bus with all devices */
1936     pci_lock_rescan_remove();
1937     pci_stop_root_bus(bridge->bus);
1938     pci_remove_root_bus(bridge->bus);
1939     pci_unlock_rescan_remove();
1940 
1941     /* Disable Root Bridge I/O space, memory space and bus mastering */
1942     val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1943     val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1944     advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
1945 
1946     /* Disable MSI */
1947     val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1948     val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
1949     advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
1950 
1951     /* Clear MSI address */
1952     advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
1953     advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
1954 
1955     /* Mask all interrupts */
1956     advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
1957     advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
1958     advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
1959     advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
1960 
1961     /* Clear all interrupts */
1962     advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
1963     advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
1964     advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
1965     advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
1966 
1967     /* Remove IRQ domains */
1968     advk_pcie_remove_rp_irq_domain(pcie);
1969     advk_pcie_remove_msi_irq_domain(pcie);
1970     advk_pcie_remove_irq_domain(pcie);
1971 
1972     /* Free config space for emulated root bridge */
1973     pci_bridge_emul_cleanup(&pcie->bridge);
1974 
1975     /* Assert PERST# signal which prepares PCIe card for power down */
1976     if (pcie->reset_gpio)
1977         gpiod_set_value_cansleep(pcie->reset_gpio, 1);
1978 
1979     /* Disable link training */
1980     val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1981     val &= ~LINK_TRAINING_EN;
1982     advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
1983 
1984     /* Disable outbound address windows mapping */
1985     for (i = 0; i < OB_WIN_COUNT; i++)
1986         advk_pcie_disable_ob_win(pcie, i);
1987 
1988     /* Disable phy */
1989     advk_pcie_disable_phy(pcie);
1990 
1991     return 0;
1992 }
1993 
1994 static const struct of_device_id advk_pcie_of_match_table[] = {
1995     { .compatible = "marvell,armada-3700-pcie", },
1996     {},
1997 };
1998 MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
1999 
2000 static struct platform_driver advk_pcie_driver = {
2001     .driver = {
2002         .name = "advk-pcie",
2003         .of_match_table = advk_pcie_of_match_table,
2004     },
2005     .probe = advk_pcie_probe,
2006     .remove = advk_pcie_remove,
2007 };
2008 module_platform_driver(advk_pcie_driver);
2009 
2010 MODULE_DESCRIPTION("Aardvark PCIe controller");
2011 MODULE_LICENSE("GPL v2");