0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/iopoll.h>
0012 #include <linux/irq.h>
0013 #include <linux/irqchip/chained_irq.h>
0014 #include <linux/irqdomain.h>
0015 #include <linux/kernel.h>
0016 #include <linux/module.h>
0017 #include <linux/msi.h>
0018 #include <linux/pci.h>
0019 #include <linux/phy/phy.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm_domain.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/reset.h>
0024
0025 #include "../pci.h"
0026
0027 #define PCIE_SETTING_REG 0x80
0028 #define PCIE_PCI_IDS_1 0x9c
0029 #define PCI_CLASS(class) (class << 8)
0030 #define PCIE_RC_MODE BIT(0)
0031
0032 #define PCIE_CFGNUM_REG 0x140
0033 #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
0034 #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
0035 #define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16))
0036 #define PCIE_CFG_FORCE_BYTE_EN BIT(20)
0037 #define PCIE_CFG_OFFSET_ADDR 0x1000
0038 #define PCIE_CFG_HEADER(bus, devfn) \
0039 (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
0040
0041 #define PCIE_RST_CTRL_REG 0x148
0042 #define PCIE_MAC_RSTB BIT(0)
0043 #define PCIE_PHY_RSTB BIT(1)
0044 #define PCIE_BRG_RSTB BIT(2)
0045 #define PCIE_PE_RSTB BIT(3)
0046
0047 #define PCIE_LTSSM_STATUS_REG 0x150
0048 #define PCIE_LTSSM_STATE_MASK GENMASK(28, 24)
0049 #define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24)
0050 #define PCIE_LTSSM_STATE_L2_IDLE 0x14
0051
0052 #define PCIE_LINK_STATUS_REG 0x154
0053 #define PCIE_PORT_LINKUP BIT(8)
0054
0055 #define PCIE_MSI_SET_NUM 8
0056 #define PCIE_MSI_IRQS_PER_SET 32
0057 #define PCIE_MSI_IRQS_NUM \
0058 (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
0059
0060 #define PCIE_INT_ENABLE_REG 0x180
0061 #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
0062 #define PCIE_MSI_SHIFT 8
0063 #define PCIE_INTX_SHIFT 24
0064 #define PCIE_INTX_ENABLE \
0065 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
0066
0067 #define PCIE_INT_STATUS_REG 0x184
0068 #define PCIE_MSI_SET_ENABLE_REG 0x190
0069 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
0070
0071 #define PCIE_MSI_SET_BASE_REG 0xc00
0072 #define PCIE_MSI_SET_OFFSET 0x10
0073 #define PCIE_MSI_SET_STATUS_OFFSET 0x04
0074 #define PCIE_MSI_SET_ENABLE_OFFSET 0x08
0075
0076 #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
0077 #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
0078
0079 #define PCIE_ICMD_PM_REG 0x198
0080 #define PCIE_TURN_OFF_LINK BIT(4)
0081
0082 #define PCIE_MISC_CTRL_REG 0x348
0083 #define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
0084
0085 #define PCIE_TRANS_TABLE_BASE_REG 0x800
0086 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
0087 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
0088 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
0089 #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
0090 #define PCIE_ATR_TLB_SET_OFFSET 0x20
0091
0092 #define PCIE_MAX_TRANS_TABLES 8
0093 #define PCIE_ATR_EN BIT(0)
0094 #define PCIE_ATR_SIZE(size) \
0095 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
0096 #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
0097 #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
0098 #define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1)
0099 #define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16))
0100 #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
0101 #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
0102
0103
0104
0105
0106
0107
0108
0109 struct mtk_msi_set {
0110 void __iomem *base;
0111 phys_addr_t msg_addr;
0112 u32 saved_irq_state;
0113 };
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 struct mtk_gen3_pcie {
0136 struct device *dev;
0137 void __iomem *base;
0138 phys_addr_t reg_base;
0139 struct reset_control *mac_reset;
0140 struct reset_control *phy_reset;
0141 struct phy *phy;
0142 struct clk_bulk_data *clks;
0143 int num_clks;
0144
0145 int irq;
0146 u32 saved_irq_state;
0147 raw_spinlock_t irq_lock;
0148 struct irq_domain *intx_domain;
0149 struct irq_domain *msi_domain;
0150 struct irq_domain *msi_bottom_domain;
0151 struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
0152 struct mutex lock;
0153 DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
0154 };
0155
0156
0157 static const char *const ltssm_str[] = {
0158 "detect.quiet",
0159 "detect.active",
0160 "polling.active",
0161 "polling.compliance",
0162 "polling.configuration",
0163 "config.linkwidthstart",
0164 "config.linkwidthaccept",
0165 "config.lanenumwait",
0166 "config.lanenumaccept",
0167 "config.complete",
0168 "config.idle",
0169 "recovery.receiverlock",
0170 "recovery.equalization",
0171 "recovery.speed",
0172 "recovery.receiverconfig",
0173 "recovery.idle",
0174 "L0",
0175 "L0s",
0176 "L1.entry",
0177 "L1.idle",
0178 "L2.idle",
0179 "L2.transmitwake",
0180 "disable",
0181 "loopback.entry",
0182 "loopback.active",
0183 "loopback.exit",
0184 "hotreset",
0185 };
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
0197 int where, int size)
0198 {
0199 struct mtk_gen3_pcie *pcie = bus->sysdata;
0200 int bytes;
0201 u32 val;
0202
0203 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
0204
0205 val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
0206 PCIE_CFG_HEADER(bus->number, devfn);
0207
0208 writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
0209 }
0210
0211 static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
0212 int where)
0213 {
0214 struct mtk_gen3_pcie *pcie = bus->sysdata;
0215
0216 return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
0217 }
0218
0219 static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
0220 int where, int size, u32 *val)
0221 {
0222 mtk_pcie_config_tlp_header(bus, devfn, where, size);
0223
0224 return pci_generic_config_read32(bus, devfn, where, size, val);
0225 }
0226
0227 static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
0228 int where, int size, u32 val)
0229 {
0230 mtk_pcie_config_tlp_header(bus, devfn, where, size);
0231
0232 if (size <= 2)
0233 val <<= (where & 0x3) * 8;
0234
0235 return pci_generic_config_write32(bus, devfn, where, 4, val);
0236 }
0237
0238 static struct pci_ops mtk_pcie_ops = {
0239 .map_bus = mtk_pcie_map_bus,
0240 .read = mtk_pcie_config_read,
0241 .write = mtk_pcie_config_write,
0242 };
0243
0244 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
0245 resource_size_t cpu_addr,
0246 resource_size_t pci_addr,
0247 resource_size_t size,
0248 unsigned long type, int num)
0249 {
0250 void __iomem *table;
0251 u32 val;
0252
0253 if (num >= PCIE_MAX_TRANS_TABLES) {
0254 dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
0255 (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
0256 return -ENODEV;
0257 }
0258
0259 table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
0260 num * PCIE_ATR_TLB_SET_OFFSET;
0261
0262 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
0263 table);
0264 writel_relaxed(upper_32_bits(cpu_addr),
0265 table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
0266 writel_relaxed(lower_32_bits(pci_addr),
0267 table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
0268 writel_relaxed(upper_32_bits(pci_addr),
0269 table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
0270
0271 if (type == IORESOURCE_IO)
0272 val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
0273 else
0274 val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
0275
0276 writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
0277
0278 return 0;
0279 }
0280
0281 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
0282 {
0283 int i;
0284 u32 val;
0285
0286 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
0287 struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
0288
0289 msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
0290 i * PCIE_MSI_SET_OFFSET;
0291 msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
0292 i * PCIE_MSI_SET_OFFSET;
0293
0294
0295 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
0296 writel_relaxed(upper_32_bits(msi_set->msg_addr),
0297 pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
0298 i * PCIE_MSI_SET_ADDR_HI_OFFSET);
0299 }
0300
0301 val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
0302 val |= PCIE_MSI_SET_ENABLE;
0303 writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
0304
0305 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
0306 val |= PCIE_MSI_ENABLE;
0307 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
0308 }
0309
0310 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
0311 {
0312 struct resource_entry *entry;
0313 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
0314 unsigned int table_index = 0;
0315 int err;
0316 u32 val;
0317
0318
0319 val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
0320 val |= PCIE_RC_MODE;
0321 writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
0322
0323
0324 val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
0325 val &= ~GENMASK(31, 8);
0326 val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
0327 writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
0328
0329
0330 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
0331 val &= ~PCIE_INTX_ENABLE;
0332 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
0333
0334
0335 val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
0336 val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
0337 writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
0338
0339
0340 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
0341 val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
0342 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
0343
0344
0345
0346
0347
0348
0349
0350 msleep(100);
0351
0352
0353 val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
0354 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
0355
0356
0357 err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
0358 !!(val & PCIE_PORT_LINKUP), 20,
0359 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
0360 if (err) {
0361 const char *ltssm_state;
0362 int ltssm_index;
0363
0364 val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
0365 ltssm_index = PCIE_LTSSM_STATE(val);
0366 ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ?
0367 "Unknown state" : ltssm_str[ltssm_index];
0368 dev_err(pcie->dev,
0369 "PCIe link down, current LTSSM state: %s (%#x)\n",
0370 ltssm_state, val);
0371 return err;
0372 }
0373
0374 mtk_pcie_enable_msi(pcie);
0375
0376
0377 resource_list_for_each_entry(entry, &host->windows) {
0378 struct resource *res = entry->res;
0379 unsigned long type = resource_type(res);
0380 resource_size_t cpu_addr;
0381 resource_size_t pci_addr;
0382 resource_size_t size;
0383 const char *range_type;
0384
0385 if (type == IORESOURCE_IO) {
0386 cpu_addr = pci_pio_to_address(res->start);
0387 range_type = "IO";
0388 } else if (type == IORESOURCE_MEM) {
0389 cpu_addr = res->start;
0390 range_type = "MEM";
0391 } else {
0392 continue;
0393 }
0394
0395 pci_addr = res->start - entry->offset;
0396 size = resource_size(res);
0397 err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
0398 type, table_index);
0399 if (err)
0400 return err;
0401
0402 dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
0403 range_type, table_index, (unsigned long long)cpu_addr,
0404 (unsigned long long)pci_addr, (unsigned long long)size);
0405
0406 table_index++;
0407 }
0408
0409 return 0;
0410 }
0411
0412 static int mtk_pcie_set_affinity(struct irq_data *data,
0413 const struct cpumask *mask, bool force)
0414 {
0415 return -EINVAL;
0416 }
0417
0418 static void mtk_pcie_msi_irq_mask(struct irq_data *data)
0419 {
0420 pci_msi_mask_irq(data);
0421 irq_chip_mask_parent(data);
0422 }
0423
0424 static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
0425 {
0426 pci_msi_unmask_irq(data);
0427 irq_chip_unmask_parent(data);
0428 }
0429
0430 static struct irq_chip mtk_msi_irq_chip = {
0431 .irq_ack = irq_chip_ack_parent,
0432 .irq_mask = mtk_pcie_msi_irq_mask,
0433 .irq_unmask = mtk_pcie_msi_irq_unmask,
0434 .name = "MSI",
0435 };
0436
0437 static struct msi_domain_info mtk_msi_domain_info = {
0438 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
0439 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
0440 .chip = &mtk_msi_irq_chip,
0441 };
0442
0443 static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
0444 {
0445 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
0446 struct mtk_gen3_pcie *pcie = data->domain->host_data;
0447 unsigned long hwirq;
0448
0449 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
0450
0451 msg->address_hi = upper_32_bits(msi_set->msg_addr);
0452 msg->address_lo = lower_32_bits(msi_set->msg_addr);
0453 msg->data = hwirq;
0454 dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
0455 hwirq, msg->address_hi, msg->address_lo, msg->data);
0456 }
0457
0458 static void mtk_msi_bottom_irq_ack(struct irq_data *data)
0459 {
0460 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
0461 unsigned long hwirq;
0462
0463 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
0464
0465 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
0466 }
0467
0468 static void mtk_msi_bottom_irq_mask(struct irq_data *data)
0469 {
0470 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
0471 struct mtk_gen3_pcie *pcie = data->domain->host_data;
0472 unsigned long hwirq, flags;
0473 u32 val;
0474
0475 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
0476
0477 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
0478 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
0479 val &= ~BIT(hwirq);
0480 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
0481 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
0482 }
0483
0484 static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
0485 {
0486 struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
0487 struct mtk_gen3_pcie *pcie = data->domain->host_data;
0488 unsigned long hwirq, flags;
0489 u32 val;
0490
0491 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
0492
0493 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
0494 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
0495 val |= BIT(hwirq);
0496 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
0497 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
0498 }
0499
0500 static struct irq_chip mtk_msi_bottom_irq_chip = {
0501 .irq_ack = mtk_msi_bottom_irq_ack,
0502 .irq_mask = mtk_msi_bottom_irq_mask,
0503 .irq_unmask = mtk_msi_bottom_irq_unmask,
0504 .irq_compose_msi_msg = mtk_compose_msi_msg,
0505 .irq_set_affinity = mtk_pcie_set_affinity,
0506 .name = "MSI",
0507 };
0508
0509 static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
0510 unsigned int virq, unsigned int nr_irqs,
0511 void *arg)
0512 {
0513 struct mtk_gen3_pcie *pcie = domain->host_data;
0514 struct mtk_msi_set *msi_set;
0515 int i, hwirq, set_idx;
0516
0517 mutex_lock(&pcie->lock);
0518
0519 hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
0520 order_base_2(nr_irqs));
0521
0522 mutex_unlock(&pcie->lock);
0523
0524 if (hwirq < 0)
0525 return -ENOSPC;
0526
0527 set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
0528 msi_set = &pcie->msi_sets[set_idx];
0529
0530 for (i = 0; i < nr_irqs; i++)
0531 irq_domain_set_info(domain, virq + i, hwirq + i,
0532 &mtk_msi_bottom_irq_chip, msi_set,
0533 handle_edge_irq, NULL, NULL);
0534
0535 return 0;
0536 }
0537
0538 static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
0539 unsigned int virq, unsigned int nr_irqs)
0540 {
0541 struct mtk_gen3_pcie *pcie = domain->host_data;
0542 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
0543
0544 mutex_lock(&pcie->lock);
0545
0546 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
0547 order_base_2(nr_irqs));
0548
0549 mutex_unlock(&pcie->lock);
0550
0551 irq_domain_free_irqs_common(domain, virq, nr_irqs);
0552 }
0553
0554 static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
0555 .alloc = mtk_msi_bottom_domain_alloc,
0556 .free = mtk_msi_bottom_domain_free,
0557 };
0558
0559 static void mtk_intx_mask(struct irq_data *data)
0560 {
0561 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
0562 unsigned long flags;
0563 u32 val;
0564
0565 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
0566 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
0567 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
0568 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
0569 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
0570 }
0571
0572 static void mtk_intx_unmask(struct irq_data *data)
0573 {
0574 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
0575 unsigned long flags;
0576 u32 val;
0577
0578 raw_spin_lock_irqsave(&pcie->irq_lock, flags);
0579 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
0580 val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
0581 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
0582 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
0583 }
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593 static void mtk_intx_eoi(struct irq_data *data)
0594 {
0595 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
0596 unsigned long hwirq;
0597
0598 hwirq = data->hwirq + PCIE_INTX_SHIFT;
0599 writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
0600 }
0601
0602 static struct irq_chip mtk_intx_irq_chip = {
0603 .irq_mask = mtk_intx_mask,
0604 .irq_unmask = mtk_intx_unmask,
0605 .irq_eoi = mtk_intx_eoi,
0606 .irq_set_affinity = mtk_pcie_set_affinity,
0607 .name = "INTx",
0608 };
0609
0610 static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
0611 irq_hw_number_t hwirq)
0612 {
0613 irq_set_chip_data(irq, domain->host_data);
0614 irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
0615 handle_fasteoi_irq, "INTx");
0616 return 0;
0617 }
0618
0619 static const struct irq_domain_ops intx_domain_ops = {
0620 .map = mtk_pcie_intx_map,
0621 };
0622
0623 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
0624 {
0625 struct device *dev = pcie->dev;
0626 struct device_node *intc_node, *node = dev->of_node;
0627 int ret;
0628
0629 raw_spin_lock_init(&pcie->irq_lock);
0630
0631
0632 intc_node = of_get_child_by_name(node, "interrupt-controller");
0633 if (!intc_node) {
0634 dev_err(dev, "missing interrupt-controller node\n");
0635 return -ENODEV;
0636 }
0637
0638 pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
0639 &intx_domain_ops, pcie);
0640 if (!pcie->intx_domain) {
0641 dev_err(dev, "failed to create INTx IRQ domain\n");
0642 ret = -ENODEV;
0643 goto out_put_node;
0644 }
0645
0646
0647 mutex_init(&pcie->lock);
0648
0649 pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
0650 &mtk_msi_bottom_domain_ops, pcie);
0651 if (!pcie->msi_bottom_domain) {
0652 dev_err(dev, "failed to create MSI bottom domain\n");
0653 ret = -ENODEV;
0654 goto err_msi_bottom_domain;
0655 }
0656
0657 pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
0658 &mtk_msi_domain_info,
0659 pcie->msi_bottom_domain);
0660 if (!pcie->msi_domain) {
0661 dev_err(dev, "failed to create MSI domain\n");
0662 ret = -ENODEV;
0663 goto err_msi_domain;
0664 }
0665
0666 of_node_put(intc_node);
0667 return 0;
0668
0669 err_msi_domain:
0670 irq_domain_remove(pcie->msi_bottom_domain);
0671 err_msi_bottom_domain:
0672 irq_domain_remove(pcie->intx_domain);
0673 out_put_node:
0674 of_node_put(intc_node);
0675 return ret;
0676 }
0677
0678 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
0679 {
0680 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
0681
0682 if (pcie->intx_domain)
0683 irq_domain_remove(pcie->intx_domain);
0684
0685 if (pcie->msi_domain)
0686 irq_domain_remove(pcie->msi_domain);
0687
0688 if (pcie->msi_bottom_domain)
0689 irq_domain_remove(pcie->msi_bottom_domain);
0690
0691 irq_dispose_mapping(pcie->irq);
0692 }
0693
0694 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
0695 {
0696 struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
0697 unsigned long msi_enable, msi_status;
0698 irq_hw_number_t bit, hwirq;
0699
0700 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
0701
0702 do {
0703 msi_status = readl_relaxed(msi_set->base +
0704 PCIE_MSI_SET_STATUS_OFFSET);
0705 msi_status &= msi_enable;
0706 if (!msi_status)
0707 break;
0708
0709 for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
0710 hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
0711 generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
0712 }
0713 } while (true);
0714 }
0715
0716 static void mtk_pcie_irq_handler(struct irq_desc *desc)
0717 {
0718 struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
0719 struct irq_chip *irqchip = irq_desc_get_chip(desc);
0720 unsigned long status;
0721 irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
0722
0723 chained_irq_enter(irqchip, desc);
0724
0725 status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
0726 for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
0727 PCIE_INTX_SHIFT)
0728 generic_handle_domain_irq(pcie->intx_domain,
0729 irq_bit - PCIE_INTX_SHIFT);
0730
0731 irq_bit = PCIE_MSI_SHIFT;
0732 for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
0733 PCIE_MSI_SHIFT) {
0734 mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
0735
0736 writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
0737 }
0738
0739 chained_irq_exit(irqchip, desc);
0740 }
0741
0742 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
0743 {
0744 struct device *dev = pcie->dev;
0745 struct platform_device *pdev = to_platform_device(dev);
0746 int err;
0747
0748 err = mtk_pcie_init_irq_domains(pcie);
0749 if (err)
0750 return err;
0751
0752 pcie->irq = platform_get_irq(pdev, 0);
0753 if (pcie->irq < 0)
0754 return pcie->irq;
0755
0756 irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
0757
0758 return 0;
0759 }
0760
0761 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
0762 {
0763 struct device *dev = pcie->dev;
0764 struct platform_device *pdev = to_platform_device(dev);
0765 struct resource *regs;
0766 int ret;
0767
0768 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
0769 if (!regs)
0770 return -EINVAL;
0771 pcie->base = devm_ioremap_resource(dev, regs);
0772 if (IS_ERR(pcie->base)) {
0773 dev_err(dev, "failed to map register base\n");
0774 return PTR_ERR(pcie->base);
0775 }
0776
0777 pcie->reg_base = regs->start;
0778
0779 pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
0780 if (IS_ERR(pcie->phy_reset)) {
0781 ret = PTR_ERR(pcie->phy_reset);
0782 if (ret != -EPROBE_DEFER)
0783 dev_err(dev, "failed to get PHY reset\n");
0784
0785 return ret;
0786 }
0787
0788 pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
0789 if (IS_ERR(pcie->mac_reset)) {
0790 ret = PTR_ERR(pcie->mac_reset);
0791 if (ret != -EPROBE_DEFER)
0792 dev_err(dev, "failed to get MAC reset\n");
0793
0794 return ret;
0795 }
0796
0797 pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
0798 if (IS_ERR(pcie->phy)) {
0799 ret = PTR_ERR(pcie->phy);
0800 if (ret != -EPROBE_DEFER)
0801 dev_err(dev, "failed to get PHY\n");
0802
0803 return ret;
0804 }
0805
0806 pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
0807 if (pcie->num_clks < 0) {
0808 dev_err(dev, "failed to get clocks\n");
0809 return pcie->num_clks;
0810 }
0811
0812 return 0;
0813 }
0814
0815 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
0816 {
0817 struct device *dev = pcie->dev;
0818 int err;
0819
0820
0821 reset_control_deassert(pcie->phy_reset);
0822
0823 err = phy_init(pcie->phy);
0824 if (err) {
0825 dev_err(dev, "failed to initialize PHY\n");
0826 goto err_phy_init;
0827 }
0828
0829 err = phy_power_on(pcie->phy);
0830 if (err) {
0831 dev_err(dev, "failed to power on PHY\n");
0832 goto err_phy_on;
0833 }
0834
0835
0836 reset_control_deassert(pcie->mac_reset);
0837
0838 pm_runtime_enable(dev);
0839 pm_runtime_get_sync(dev);
0840
0841 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
0842 if (err) {
0843 dev_err(dev, "failed to enable clocks\n");
0844 goto err_clk_init;
0845 }
0846
0847 return 0;
0848
0849 err_clk_init:
0850 pm_runtime_put_sync(dev);
0851 pm_runtime_disable(dev);
0852 reset_control_assert(pcie->mac_reset);
0853 phy_power_off(pcie->phy);
0854 err_phy_on:
0855 phy_exit(pcie->phy);
0856 err_phy_init:
0857 reset_control_assert(pcie->phy_reset);
0858
0859 return err;
0860 }
0861
0862 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
0863 {
0864 clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
0865
0866 pm_runtime_put_sync(pcie->dev);
0867 pm_runtime_disable(pcie->dev);
0868 reset_control_assert(pcie->mac_reset);
0869
0870 phy_power_off(pcie->phy);
0871 phy_exit(pcie->phy);
0872 reset_control_assert(pcie->phy_reset);
0873 }
0874
0875 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
0876 {
0877 int err;
0878
0879 err = mtk_pcie_parse_port(pcie);
0880 if (err)
0881 return err;
0882
0883
0884
0885
0886
0887 reset_control_assert(pcie->phy_reset);
0888 reset_control_assert(pcie->mac_reset);
0889 usleep_range(10, 20);
0890
0891
0892 err = mtk_pcie_power_up(pcie);
0893 if (err)
0894 return err;
0895
0896
0897 err = mtk_pcie_startup_port(pcie);
0898 if (err)
0899 goto err_setup;
0900
0901 err = mtk_pcie_setup_irq(pcie);
0902 if (err)
0903 goto err_setup;
0904
0905 return 0;
0906
0907 err_setup:
0908 mtk_pcie_power_down(pcie);
0909
0910 return err;
0911 }
0912
0913 static int mtk_pcie_probe(struct platform_device *pdev)
0914 {
0915 struct device *dev = &pdev->dev;
0916 struct mtk_gen3_pcie *pcie;
0917 struct pci_host_bridge *host;
0918 int err;
0919
0920 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
0921 if (!host)
0922 return -ENOMEM;
0923
0924 pcie = pci_host_bridge_priv(host);
0925
0926 pcie->dev = dev;
0927 platform_set_drvdata(pdev, pcie);
0928
0929 err = mtk_pcie_setup(pcie);
0930 if (err)
0931 return err;
0932
0933 host->ops = &mtk_pcie_ops;
0934 host->sysdata = pcie;
0935
0936 err = pci_host_probe(host);
0937 if (err) {
0938 mtk_pcie_irq_teardown(pcie);
0939 mtk_pcie_power_down(pcie);
0940 return err;
0941 }
0942
0943 return 0;
0944 }
0945
0946 static int mtk_pcie_remove(struct platform_device *pdev)
0947 {
0948 struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
0949 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
0950
0951 pci_lock_rescan_remove();
0952 pci_stop_root_bus(host->bus);
0953 pci_remove_root_bus(host->bus);
0954 pci_unlock_rescan_remove();
0955
0956 mtk_pcie_irq_teardown(pcie);
0957 mtk_pcie_power_down(pcie);
0958
0959 return 0;
0960 }
0961
0962 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
0963 {
0964 int i;
0965
0966 raw_spin_lock(&pcie->irq_lock);
0967
0968 pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
0969
0970 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
0971 struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
0972
0973 msi_set->saved_irq_state = readl_relaxed(msi_set->base +
0974 PCIE_MSI_SET_ENABLE_OFFSET);
0975 }
0976
0977 raw_spin_unlock(&pcie->irq_lock);
0978 }
0979
0980 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
0981 {
0982 int i;
0983
0984 raw_spin_lock(&pcie->irq_lock);
0985
0986 writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
0987
0988 for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
0989 struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
0990
0991 writel_relaxed(msi_set->saved_irq_state,
0992 msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
0993 }
0994
0995 raw_spin_unlock(&pcie->irq_lock);
0996 }
0997
0998 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
0999 {
1000 u32 val;
1001
1002 val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
1003 val |= PCIE_TURN_OFF_LINK;
1004 writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
1005
1006
1007 return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
1008 (PCIE_LTSSM_STATE(val) ==
1009 PCIE_LTSSM_STATE_L2_IDLE), 20,
1010 50 * USEC_PER_MSEC);
1011 }
1012
1013 static int mtk_pcie_suspend_noirq(struct device *dev)
1014 {
1015 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1016 int err;
1017 u32 val;
1018
1019
1020 err = mtk_pcie_turn_off_link(pcie);
1021 if (err) {
1022 dev_err(pcie->dev, "cannot enter L2 state\n");
1023 return err;
1024 }
1025
1026
1027 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
1028 val |= PCIE_PE_RSTB;
1029 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
1030
1031 dev_dbg(pcie->dev, "entered L2 states successfully");
1032
1033 mtk_pcie_irq_save(pcie);
1034 mtk_pcie_power_down(pcie);
1035
1036 return 0;
1037 }
1038
1039 static int mtk_pcie_resume_noirq(struct device *dev)
1040 {
1041 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1042 int err;
1043
1044 err = mtk_pcie_power_up(pcie);
1045 if (err)
1046 return err;
1047
1048 err = mtk_pcie_startup_port(pcie);
1049 if (err) {
1050 mtk_pcie_power_down(pcie);
1051 return err;
1052 }
1053
1054 mtk_pcie_irq_restore(pcie);
1055
1056 return 0;
1057 }
1058
1059 static const struct dev_pm_ops mtk_pcie_pm_ops = {
1060 NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1061 mtk_pcie_resume_noirq)
1062 };
1063
1064 static const struct of_device_id mtk_pcie_of_match[] = {
1065 { .compatible = "mediatek,mt8192-pcie" },
1066 {},
1067 };
1068 MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1069
1070 static struct platform_driver mtk_pcie_driver = {
1071 .probe = mtk_pcie_probe,
1072 .remove = mtk_pcie_remove,
1073 .driver = {
1074 .name = "mtk-pcie",
1075 .of_match_table = mtk_pcie_of_match,
1076 .pm = &mtk_pcie_pm_ops,
1077 },
1078 };
1079
1080 module_platform_driver(mtk_pcie_driver);
1081 MODULE_LICENSE("GPL v2");