0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/pci.h>
0011 #include <linux/bitfield.h>
0012 #include <linux/clk.h>
0013 #include <linux/delay.h>
0014 #include <linux/gpio.h>
0015 #include <linux/init.h>
0016 #include <linux/mbus.h>
0017 #include <linux/slab.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/of_address.h>
0020 #include <linux/of_irq.h>
0021 #include <linux/of_gpio.h>
0022 #include <linux/of_pci.h>
0023 #include <linux/of_platform.h>
0024
0025 #include "../pci.h"
0026 #include "../pci-bridge-emul.h"
0027
0028
0029
0030
0031 #define PCIE_DEV_ID_OFF 0x0000
0032 #define PCIE_CMD_OFF 0x0004
0033 #define PCIE_DEV_REV_OFF 0x0008
0034 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
0035 #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
0036 #define PCIE_SSDEV_ID_OFF 0x002c
0037 #define PCIE_CAP_PCIEXP 0x0060
0038 #define PCIE_CAP_PCIERR_OFF 0x0100
0039 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
0040 #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
0041 #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
0042 #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
0043 #define PCIE_WIN5_CTRL_OFF 0x1880
0044 #define PCIE_WIN5_BASE_OFF 0x1884
0045 #define PCIE_WIN5_REMAP_OFF 0x188c
0046 #define PCIE_CONF_ADDR_OFF 0x18f8
0047 #define PCIE_CONF_ADDR_EN 0x80000000
0048 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
0049 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
0050 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
0051 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
0052 #define PCIE_CONF_ADDR(bus, devfn, where) \
0053 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
0054 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
0055 PCIE_CONF_ADDR_EN)
0056 #define PCIE_CONF_DATA_OFF 0x18fc
0057 #define PCIE_INT_CAUSE_OFF 0x1900
0058 #define PCIE_INT_UNMASK_OFF 0x1910
0059 #define PCIE_INT_INTX(i) BIT(24+i)
0060 #define PCIE_INT_PM_PME BIT(28)
0061 #define PCIE_INT_ALL_MASK GENMASK(31, 0)
0062 #define PCIE_CTRL_OFF 0x1a00
0063 #define PCIE_CTRL_X1_MODE 0x0001
0064 #define PCIE_CTRL_RC_MODE BIT(1)
0065 #define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
0066 #define PCIE_STAT_OFF 0x1a04
0067 #define PCIE_STAT_BUS 0xff00
0068 #define PCIE_STAT_DEV 0x1f0000
0069 #define PCIE_STAT_LINK_DOWN BIT(0)
0070 #define PCIE_SSPL_OFF 0x1a0c
0071 #define PCIE_SSPL_VALUE_SHIFT 0
0072 #define PCIE_SSPL_VALUE_MASK GENMASK(7, 0)
0073 #define PCIE_SSPL_SCALE_SHIFT 8
0074 #define PCIE_SSPL_SCALE_MASK GENMASK(9, 8)
0075 #define PCIE_SSPL_ENABLE BIT(16)
0076 #define PCIE_RC_RTSTA 0x1a14
0077 #define PCIE_DEBUG_CTRL 0x1a60
0078 #define PCIE_DEBUG_SOFT_RESET BIT(20)
0079
0080 struct mvebu_pcie_port;
0081
0082
0083 struct mvebu_pcie {
0084 struct platform_device *pdev;
0085 struct mvebu_pcie_port *ports;
0086 struct resource io;
0087 struct resource realio;
0088 struct resource mem;
0089 struct resource busn;
0090 int nports;
0091 };
0092
0093 struct mvebu_pcie_window {
0094 phys_addr_t base;
0095 phys_addr_t remap;
0096 size_t size;
0097 };
0098
0099
0100 struct mvebu_pcie_port {
0101 char *name;
0102 void __iomem *base;
0103 u32 port;
0104 u32 lane;
0105 bool is_x4;
0106 int devfn;
0107 unsigned int mem_target;
0108 unsigned int mem_attr;
0109 unsigned int io_target;
0110 unsigned int io_attr;
0111 struct clk *clk;
0112 struct gpio_desc *reset_gpio;
0113 char *reset_name;
0114 struct pci_bridge_emul bridge;
0115 struct device_node *dn;
0116 struct mvebu_pcie *pcie;
0117 struct mvebu_pcie_window memwin;
0118 struct mvebu_pcie_window iowin;
0119 u32 saved_pcie_stat;
0120 struct resource regs;
0121 u8 slot_power_limit_value;
0122 u8 slot_power_limit_scale;
0123 struct irq_domain *intx_irq_domain;
0124 raw_spinlock_t irq_lock;
0125 int intx_irq;
0126 };
0127
0128 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
0129 {
0130 writel(val, port->base + reg);
0131 }
0132
0133 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
0134 {
0135 return readl(port->base + reg);
0136 }
0137
0138 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
0139 {
0140 return port->io_target != -1 && port->io_attr != -1;
0141 }
0142
0143 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
0144 {
0145 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
0146 }
0147
0148 static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
0149 {
0150 return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
0151 }
0152
0153 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
0154 {
0155 u32 stat;
0156
0157 stat = mvebu_readl(port, PCIE_STAT_OFF);
0158 stat &= ~PCIE_STAT_BUS;
0159 stat |= nr << 8;
0160 mvebu_writel(port, stat, PCIE_STAT_OFF);
0161 }
0162
0163 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
0164 {
0165 u32 stat;
0166
0167 stat = mvebu_readl(port, PCIE_STAT_OFF);
0168 stat &= ~PCIE_STAT_DEV;
0169 stat |= nr << 16;
0170 mvebu_writel(port, stat, PCIE_STAT_OFF);
0171 }
0172
0173 static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
0174 {
0175 int i;
0176
0177 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
0178 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
0179
0180 for (i = 1; i < 3; i++) {
0181 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
0182 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
0183 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
0184 }
0185
0186 for (i = 0; i < 5; i++) {
0187 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
0188 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
0189 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
0190 }
0191
0192 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
0193 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
0194 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
0205 {
0206 const struct mbus_dram_target_info *dram;
0207 u32 size;
0208 int i;
0209
0210 dram = mv_mbus_dram_info();
0211
0212
0213 mvebu_pcie_disable_wins(port);
0214
0215
0216 size = 0;
0217 for (i = 0; i < dram->num_cs; i++) {
0218 const struct mbus_dram_window *cs = dram->cs + i;
0219
0220 mvebu_writel(port, cs->base & 0xffff0000,
0221 PCIE_WIN04_BASE_OFF(i));
0222 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
0223 mvebu_writel(port,
0224 ((cs->size - 1) & 0xffff0000) |
0225 (cs->mbus_attr << 8) |
0226 (dram->mbus_dram_target_id << 4) | 1,
0227 PCIE_WIN04_CTRL_OFF(i));
0228
0229 size += cs->size;
0230 }
0231
0232
0233 if ((size & (size - 1)) != 0)
0234 size = 1 << fls(size);
0235
0236
0237 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
0238 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
0239 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
0240 PCIE_BAR_CTRL_OFF(1));
0241
0242
0243
0244
0245 mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
0246 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
0247 }
0248
0249 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
0250 {
0251 u32 ctrl, lnkcap, cmd, dev_rev, unmask, sspl;
0252
0253
0254 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
0255 ctrl |= PCIE_CTRL_RC_MODE;
0256 mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
0257
0258
0259
0260
0261
0262
0263
0264
0265 lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
0266 lnkcap &= ~PCI_EXP_LNKCAP_MLW;
0267 lnkcap |= (port->is_x4 ? 4 : 1) << 4;
0268 mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
0269
0270
0271 cmd = mvebu_readl(port, PCIE_CMD_OFF);
0272 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
0273 mvebu_writel(port, cmd, PCIE_CMD_OFF);
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
0297 dev_rev &= ~0xffffff00;
0298 dev_rev |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
0299 mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
0300
0301
0302 mvebu_pcie_setup_wins(port);
0303
0304
0305
0306
0307
0308
0309 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
0310 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
0311 if (port->slot_power_limit_value) {
0312 sspl |= port->slot_power_limit_value << PCIE_SSPL_VALUE_SHIFT;
0313 sspl |= port->slot_power_limit_scale << PCIE_SSPL_SCALE_SHIFT;
0314 sspl |= PCIE_SSPL_ENABLE;
0315 }
0316 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
0317
0318
0319 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
0320
0321
0322 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
0323
0324
0325 if (port->intx_irq > 0)
0326 return;
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
0339 unmask |= PCIE_INT_INTX(0) | PCIE_INT_INTX(1) |
0340 PCIE_INT_INTX(2) | PCIE_INT_INTX(3);
0341 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
0342 }
0343
0344 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
0345 struct pci_bus *bus,
0346 int devfn);
0347
0348 static int mvebu_pcie_child_rd_conf(struct pci_bus *bus, u32 devfn, int where,
0349 int size, u32 *val)
0350 {
0351 struct mvebu_pcie *pcie = bus->sysdata;
0352 struct mvebu_pcie_port *port;
0353 void __iomem *conf_data;
0354
0355 port = mvebu_pcie_find_port(pcie, bus, devfn);
0356 if (!port)
0357 return PCIBIOS_DEVICE_NOT_FOUND;
0358
0359 if (!mvebu_pcie_link_up(port))
0360 return PCIBIOS_DEVICE_NOT_FOUND;
0361
0362 conf_data = port->base + PCIE_CONF_DATA_OFF;
0363
0364 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
0365 PCIE_CONF_ADDR_OFF);
0366
0367 switch (size) {
0368 case 1:
0369 *val = readb_relaxed(conf_data + (where & 3));
0370 break;
0371 case 2:
0372 *val = readw_relaxed(conf_data + (where & 2));
0373 break;
0374 case 4:
0375 *val = readl_relaxed(conf_data);
0376 break;
0377 default:
0378 return PCIBIOS_BAD_REGISTER_NUMBER;
0379 }
0380
0381 return PCIBIOS_SUCCESSFUL;
0382 }
0383
0384 static int mvebu_pcie_child_wr_conf(struct pci_bus *bus, u32 devfn,
0385 int where, int size, u32 val)
0386 {
0387 struct mvebu_pcie *pcie = bus->sysdata;
0388 struct mvebu_pcie_port *port;
0389 void __iomem *conf_data;
0390
0391 port = mvebu_pcie_find_port(pcie, bus, devfn);
0392 if (!port)
0393 return PCIBIOS_DEVICE_NOT_FOUND;
0394
0395 if (!mvebu_pcie_link_up(port))
0396 return PCIBIOS_DEVICE_NOT_FOUND;
0397
0398 conf_data = port->base + PCIE_CONF_DATA_OFF;
0399
0400 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
0401 PCIE_CONF_ADDR_OFF);
0402
0403 switch (size) {
0404 case 1:
0405 writeb(val, conf_data + (where & 3));
0406 break;
0407 case 2:
0408 writew(val, conf_data + (where & 2));
0409 break;
0410 case 4:
0411 writel(val, conf_data);
0412 break;
0413 default:
0414 return PCIBIOS_BAD_REGISTER_NUMBER;
0415 }
0416
0417 return PCIBIOS_SUCCESSFUL;
0418 }
0419
0420 static struct pci_ops mvebu_pcie_child_ops = {
0421 .read = mvebu_pcie_child_rd_conf,
0422 .write = mvebu_pcie_child_wr_conf,
0423 };
0424
0425
0426
0427
0428
0429 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
0430 phys_addr_t base, size_t size)
0431 {
0432 while (size) {
0433 size_t sz = 1 << (fls(size) - 1);
0434
0435 mvebu_mbus_del_window(base, sz);
0436 base += sz;
0437 size -= sz;
0438 }
0439 }
0440
0441
0442
0443
0444
0445
0446
0447 static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
0448 unsigned int target, unsigned int attribute,
0449 phys_addr_t base, size_t size,
0450 phys_addr_t remap)
0451 {
0452 size_t size_mapped = 0;
0453
0454 while (size) {
0455 size_t sz = 1 << (fls(size) - 1);
0456 int ret;
0457
0458 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
0459 sz, remap);
0460 if (ret) {
0461 phys_addr_t end = base + sz - 1;
0462
0463 dev_err(&port->pcie->pdev->dev,
0464 "Could not create MBus window at [mem %pa-%pa]: %d\n",
0465 &base, &end, ret);
0466 mvebu_pcie_del_windows(port, base - size_mapped,
0467 size_mapped);
0468 return ret;
0469 }
0470
0471 size -= sz;
0472 size_mapped += sz;
0473 base += sz;
0474 if (remap != MVEBU_MBUS_NO_REMAP)
0475 remap += sz;
0476 }
0477
0478 return 0;
0479 }
0480
0481 static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
0482 unsigned int target, unsigned int attribute,
0483 const struct mvebu_pcie_window *desired,
0484 struct mvebu_pcie_window *cur)
0485 {
0486 int ret;
0487
0488 if (desired->base == cur->base && desired->remap == cur->remap &&
0489 desired->size == cur->size)
0490 return 0;
0491
0492 if (cur->size != 0) {
0493 mvebu_pcie_del_windows(port, cur->base, cur->size);
0494 cur->size = 0;
0495 cur->base = 0;
0496
0497
0498
0499
0500
0501
0502 }
0503
0504 if (desired->size == 0)
0505 return 0;
0506
0507 ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
0508 desired->size, desired->remap);
0509 if (ret) {
0510 cur->size = 0;
0511 cur->base = 0;
0512 return ret;
0513 }
0514
0515 *cur = *desired;
0516 return 0;
0517 }
0518
0519 static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
0520 {
0521 struct mvebu_pcie_window desired = {};
0522 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
0523
0524
0525 if (conf->iolimit < conf->iobase ||
0526 conf->iolimitupper < conf->iobaseupper)
0527 return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
0528 &desired, &port->iowin);
0529
0530
0531
0532
0533
0534
0535
0536
0537 desired.remap = ((conf->iobase & 0xF0) << 8) |
0538 (conf->iobaseupper << 16);
0539 desired.base = port->pcie->io.start + desired.remap;
0540 desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
0541 (conf->iolimitupper << 16)) -
0542 desired.remap) +
0543 1;
0544
0545 return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
0546 &port->iowin);
0547 }
0548
0549 static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
0550 {
0551 struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
0552 struct pci_bridge_emul_conf *conf = &port->bridge.conf;
0553
0554
0555 if (conf->memlimit < conf->membase)
0556 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
0557 &desired, &port->memwin);
0558
0559
0560
0561
0562
0563
0564
0565 desired.base = ((conf->membase & 0xFFF0) << 16);
0566 desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
0567 desired.base + 1;
0568
0569 return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
0570 &port->memwin);
0571 }
0572
0573 static pci_bridge_emul_read_status_t
0574 mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
0575 int reg, u32 *value)
0576 {
0577 struct mvebu_pcie_port *port = bridge->data;
0578
0579 switch (reg) {
0580 case PCI_COMMAND:
0581 *value = mvebu_readl(port, PCIE_CMD_OFF);
0582 break;
0583
0584 case PCI_PRIMARY_BUS: {
0585
0586
0587
0588
0589
0590 __le32 *cfgspace = (__le32 *)&bridge->conf;
0591 u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
0592 val &= ~0xff00;
0593 val |= mvebu_pcie_get_local_bus_nr(port) << 8;
0594 *value = val;
0595 break;
0596 }
0597
0598 case PCI_INTERRUPT_LINE: {
0599
0600
0601
0602
0603
0604 __le32 *cfgspace = (__le32 *)&bridge->conf;
0605 u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
0606 if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
0607 val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
0608 else
0609 val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
0610 *value = val;
0611 break;
0612 }
0613
0614 default:
0615 return PCI_BRIDGE_EMUL_NOT_HANDLED;
0616 }
0617
0618 return PCI_BRIDGE_EMUL_HANDLED;
0619 }
0620
0621 static pci_bridge_emul_read_status_t
0622 mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
0623 int reg, u32 *value)
0624 {
0625 struct mvebu_pcie_port *port = bridge->data;
0626
0627 switch (reg) {
0628 case PCI_EXP_DEVCAP:
0629 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
0630 break;
0631
0632 case PCI_EXP_DEVCTL:
0633 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
0634 break;
0635
0636 case PCI_EXP_LNKCAP:
0637
0638
0639
0640
0641
0642
0643 *value = (mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
0644 ~PCI_EXP_LNKCAP_CLKPM) | PCI_EXP_LNKCAP_DLLLARC;
0645 break;
0646
0647 case PCI_EXP_LNKCTL:
0648
0649 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL) |
0650 (mvebu_pcie_link_up(port) ?
0651 (PCI_EXP_LNKSTA_DLLLA << 16) : 0);
0652 break;
0653
0654 case PCI_EXP_SLTCTL: {
0655 u16 slotctl = le16_to_cpu(bridge->pcie_conf.slotctl);
0656 u16 slotsta = le16_to_cpu(bridge->pcie_conf.slotsta);
0657 u32 val = 0;
0658
0659
0660
0661
0662
0663 if (!port->slot_power_limit_value)
0664 val |= slotctl & PCI_EXP_SLTCTL_ASPL_DISABLE;
0665 else if (!(mvebu_readl(port, PCIE_SSPL_OFF) & PCIE_SSPL_ENABLE))
0666 val |= PCI_EXP_SLTCTL_ASPL_DISABLE;
0667
0668 val |= slotsta << 16;
0669 *value = val;
0670 break;
0671 }
0672
0673 case PCI_EXP_RTSTA:
0674 *value = mvebu_readl(port, PCIE_RC_RTSTA);
0675 break;
0676
0677 case PCI_EXP_DEVCAP2:
0678 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
0679 break;
0680
0681 case PCI_EXP_DEVCTL2:
0682 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
0683 break;
0684
0685 case PCI_EXP_LNKCTL2:
0686 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
0687 break;
0688
0689 default:
0690 return PCI_BRIDGE_EMUL_NOT_HANDLED;
0691 }
0692
0693 return PCI_BRIDGE_EMUL_HANDLED;
0694 }
0695
0696 static pci_bridge_emul_read_status_t
0697 mvebu_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
0698 int reg, u32 *value)
0699 {
0700 struct mvebu_pcie_port *port = bridge->data;
0701
0702 switch (reg) {
0703 case 0:
0704 case PCI_ERR_UNCOR_STATUS:
0705 case PCI_ERR_UNCOR_MASK:
0706 case PCI_ERR_UNCOR_SEVER:
0707 case PCI_ERR_COR_STATUS:
0708 case PCI_ERR_COR_MASK:
0709 case PCI_ERR_CAP:
0710 case PCI_ERR_HEADER_LOG+0:
0711 case PCI_ERR_HEADER_LOG+4:
0712 case PCI_ERR_HEADER_LOG+8:
0713 case PCI_ERR_HEADER_LOG+12:
0714 case PCI_ERR_ROOT_COMMAND:
0715 case PCI_ERR_ROOT_STATUS:
0716 case PCI_ERR_ROOT_ERR_SRC:
0717 *value = mvebu_readl(port, PCIE_CAP_PCIERR_OFF + reg);
0718 break;
0719
0720 default:
0721 return PCI_BRIDGE_EMUL_NOT_HANDLED;
0722 }
0723
0724 return PCI_BRIDGE_EMUL_HANDLED;
0725 }
0726
0727 static void
0728 mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
0729 int reg, u32 old, u32 new, u32 mask)
0730 {
0731 struct mvebu_pcie_port *port = bridge->data;
0732 struct pci_bridge_emul_conf *conf = &bridge->conf;
0733
0734 switch (reg) {
0735 case PCI_COMMAND:
0736 mvebu_writel(port, new, PCIE_CMD_OFF);
0737 break;
0738
0739 case PCI_IO_BASE:
0740 if ((mask & 0xffff) && mvebu_has_ioport(port) &&
0741 mvebu_pcie_handle_iobase_change(port)) {
0742
0743 conf->iobase &= ~0xf0;
0744 conf->iolimit &= ~0xf0;
0745 conf->iobase |= 0xf0;
0746 conf->iobaseupper = cpu_to_le16(0x0000);
0747 conf->iolimitupper = cpu_to_le16(0x0000);
0748 }
0749 break;
0750
0751 case PCI_MEMORY_BASE:
0752 if (mvebu_pcie_handle_membase_change(port)) {
0753
0754 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
0755 conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
0756 conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
0757 }
0758 break;
0759
0760 case PCI_IO_BASE_UPPER16:
0761 if (mvebu_has_ioport(port) &&
0762 mvebu_pcie_handle_iobase_change(port)) {
0763
0764 conf->iobase &= ~0xf0;
0765 conf->iolimit &= ~0xf0;
0766 conf->iobase |= 0xf0;
0767 conf->iobaseupper = cpu_to_le16(0x0000);
0768 conf->iolimitupper = cpu_to_le16(0x0000);
0769 }
0770 break;
0771
0772 case PCI_PRIMARY_BUS:
0773 if (mask & 0xff00)
0774 mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
0775 break;
0776
0777 case PCI_INTERRUPT_LINE:
0778 if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
0779 u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
0780 if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
0781 ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
0782 else
0783 ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
0784 mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
0785 }
0786 break;
0787
0788 default:
0789 break;
0790 }
0791 }
0792
0793 static void
0794 mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
0795 int reg, u32 old, u32 new, u32 mask)
0796 {
0797 struct mvebu_pcie_port *port = bridge->data;
0798
0799 switch (reg) {
0800 case PCI_EXP_DEVCTL:
0801 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
0802 break;
0803
0804 case PCI_EXP_LNKCTL:
0805
0806
0807
0808
0809
0810 new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
0811
0812 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
0813 break;
0814
0815 case PCI_EXP_SLTCTL:
0816
0817
0818
0819
0820 if ((mask & PCI_EXP_SLTCTL_ASPL_DISABLE) &&
0821 port->slot_power_limit_value) {
0822 u32 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
0823 if (new & PCI_EXP_SLTCTL_ASPL_DISABLE)
0824 sspl &= ~PCIE_SSPL_ENABLE;
0825 else
0826 sspl |= PCIE_SSPL_ENABLE;
0827 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
0828 }
0829 break;
0830
0831 case PCI_EXP_RTSTA:
0832
0833
0834
0835
0836
0837
0838 if (new & PCI_EXP_RTSTA_PME)
0839 mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
0840 break;
0841
0842 case PCI_EXP_DEVCTL2:
0843 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
0844 break;
0845
0846 case PCI_EXP_LNKCTL2:
0847 mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
0848 break;
0849
0850 default:
0851 break;
0852 }
0853 }
0854
0855 static void
0856 mvebu_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
0857 int reg, u32 old, u32 new, u32 mask)
0858 {
0859 struct mvebu_pcie_port *port = bridge->data;
0860
0861 switch (reg) {
0862
0863 case PCI_ERR_UNCOR_STATUS:
0864 case PCI_ERR_COR_STATUS:
0865 case PCI_ERR_ROOT_STATUS:
0866 new &= mask;
0867 fallthrough;
0868
0869 case PCI_ERR_UNCOR_MASK:
0870 case PCI_ERR_UNCOR_SEVER:
0871 case PCI_ERR_COR_MASK:
0872 case PCI_ERR_CAP:
0873 case PCI_ERR_HEADER_LOG+0:
0874 case PCI_ERR_HEADER_LOG+4:
0875 case PCI_ERR_HEADER_LOG+8:
0876 case PCI_ERR_HEADER_LOG+12:
0877 case PCI_ERR_ROOT_COMMAND:
0878 case PCI_ERR_ROOT_ERR_SRC:
0879 mvebu_writel(port, new, PCIE_CAP_PCIERR_OFF + reg);
0880 break;
0881
0882 default:
0883 break;
0884 }
0885 }
0886
0887 static const struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
0888 .read_base = mvebu_pci_bridge_emul_base_conf_read,
0889 .write_base = mvebu_pci_bridge_emul_base_conf_write,
0890 .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
0891 .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
0892 .read_ext = mvebu_pci_bridge_emul_ext_conf_read,
0893 .write_ext = mvebu_pci_bridge_emul_ext_conf_write,
0894 };
0895
0896
0897
0898
0899
0900 static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
0901 {
0902 unsigned int bridge_flags = PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD;
0903 struct pci_bridge_emul *bridge = &port->bridge;
0904 u32 dev_id = mvebu_readl(port, PCIE_DEV_ID_OFF);
0905 u32 dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
0906 u32 ssdev_id = mvebu_readl(port, PCIE_SSDEV_ID_OFF);
0907 u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
0908 u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
0909
0910 bridge->conf.vendor = cpu_to_le16(dev_id & 0xffff);
0911 bridge->conf.device = cpu_to_le16(dev_id >> 16);
0912 bridge->conf.class_revision = cpu_to_le32(dev_rev & 0xff);
0913
0914 if (mvebu_has_ioport(port)) {
0915
0916 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
0917 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
0918 } else {
0919 bridge_flags |= PCI_BRIDGE_EMUL_NO_IO_FORWARD;
0920 }
0921
0922
0923
0924
0925
0926
0927 bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver | PCI_EXP_FLAGS_SLOT);
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940 bridge->pcie_conf.slotcap = cpu_to_le32(
0941 FIELD_PREP(PCI_EXP_SLTCAP_SPLV, port->slot_power_limit_value) |
0942 FIELD_PREP(PCI_EXP_SLTCAP_SPLS, port->slot_power_limit_scale) |
0943 FIELD_PREP(PCI_EXP_SLTCAP_PSN, port->port+1));
0944 bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
0945
0946 bridge->subsystem_vendor_id = ssdev_id & 0xffff;
0947 bridge->subsystem_id = ssdev_id >> 16;
0948 bridge->has_pcie = true;
0949 bridge->data = port;
0950 bridge->ops = &mvebu_pci_bridge_emul_ops;
0951
0952 return pci_bridge_emul_init(bridge, bridge_flags);
0953 }
0954
0955 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
0956 {
0957 return sys->private_data;
0958 }
0959
0960 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
0961 struct pci_bus *bus,
0962 int devfn)
0963 {
0964 int i;
0965
0966 for (i = 0; i < pcie->nports; i++) {
0967 struct mvebu_pcie_port *port = &pcie->ports[i];
0968
0969 if (!port->base)
0970 continue;
0971
0972 if (bus->number == 0 && port->devfn == devfn)
0973 return port;
0974 if (bus->number != 0 &&
0975 bus->number >= port->bridge.conf.secondary_bus &&
0976 bus->number <= port->bridge.conf.subordinate_bus)
0977 return port;
0978 }
0979
0980 return NULL;
0981 }
0982
0983
0984 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
0985 int where, int size, u32 val)
0986 {
0987 struct mvebu_pcie *pcie = bus->sysdata;
0988 struct mvebu_pcie_port *port;
0989
0990 port = mvebu_pcie_find_port(pcie, bus, devfn);
0991 if (!port)
0992 return PCIBIOS_DEVICE_NOT_FOUND;
0993
0994 return pci_bridge_emul_conf_write(&port->bridge, where, size, val);
0995 }
0996
0997
0998 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
0999 int size, u32 *val)
1000 {
1001 struct mvebu_pcie *pcie = bus->sysdata;
1002 struct mvebu_pcie_port *port;
1003
1004 port = mvebu_pcie_find_port(pcie, bus, devfn);
1005 if (!port)
1006 return PCIBIOS_DEVICE_NOT_FOUND;
1007
1008 return pci_bridge_emul_conf_read(&port->bridge, where, size, val);
1009 }
1010
1011 static struct pci_ops mvebu_pcie_ops = {
1012 .read = mvebu_pcie_rd_conf,
1013 .write = mvebu_pcie_wr_conf,
1014 };
1015
1016 static void mvebu_pcie_intx_irq_mask(struct irq_data *d)
1017 {
1018 struct mvebu_pcie_port *port = d->domain->host_data;
1019 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1020 unsigned long flags;
1021 u32 unmask;
1022
1023 raw_spin_lock_irqsave(&port->irq_lock, flags);
1024 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1025 unmask &= ~PCIE_INT_INTX(hwirq);
1026 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1027 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1028 }
1029
1030 static void mvebu_pcie_intx_irq_unmask(struct irq_data *d)
1031 {
1032 struct mvebu_pcie_port *port = d->domain->host_data;
1033 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1034 unsigned long flags;
1035 u32 unmask;
1036
1037 raw_spin_lock_irqsave(&port->irq_lock, flags);
1038 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1039 unmask |= PCIE_INT_INTX(hwirq);
1040 mvebu_writel(port, unmask, PCIE_INT_UNMASK_OFF);
1041 raw_spin_unlock_irqrestore(&port->irq_lock, flags);
1042 }
1043
1044 static struct irq_chip intx_irq_chip = {
1045 .name = "mvebu-INTx",
1046 .irq_mask = mvebu_pcie_intx_irq_mask,
1047 .irq_unmask = mvebu_pcie_intx_irq_unmask,
1048 };
1049
1050 static int mvebu_pcie_intx_irq_map(struct irq_domain *h,
1051 unsigned int virq, irq_hw_number_t hwirq)
1052 {
1053 struct mvebu_pcie_port *port = h->host_data;
1054
1055 irq_set_status_flags(virq, IRQ_LEVEL);
1056 irq_set_chip_and_handler(virq, &intx_irq_chip, handle_level_irq);
1057 irq_set_chip_data(virq, port);
1058
1059 return 0;
1060 }
1061
1062 static const struct irq_domain_ops mvebu_pcie_intx_irq_domain_ops = {
1063 .map = mvebu_pcie_intx_irq_map,
1064 .xlate = irq_domain_xlate_onecell,
1065 };
1066
1067 static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
1068 {
1069 struct device *dev = &port->pcie->pdev->dev;
1070 struct device_node *pcie_intc_node;
1071
1072 raw_spin_lock_init(&port->irq_lock);
1073
1074 pcie_intc_node = of_get_next_child(port->dn, NULL);
1075 if (!pcie_intc_node) {
1076 dev_err(dev, "No PCIe Intc node found for %s\n", port->name);
1077 return -ENODEV;
1078 }
1079
1080 port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
1081 &mvebu_pcie_intx_irq_domain_ops,
1082 port);
1083 of_node_put(pcie_intc_node);
1084 if (!port->intx_irq_domain) {
1085 dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
1086 return -ENOMEM;
1087 }
1088
1089 return 0;
1090 }
1091
1092 static void mvebu_pcie_irq_handler(struct irq_desc *desc)
1093 {
1094 struct mvebu_pcie_port *port = irq_desc_get_handler_data(desc);
1095 struct irq_chip *chip = irq_desc_get_chip(desc);
1096 struct device *dev = &port->pcie->pdev->dev;
1097 u32 cause, unmask, status;
1098 int i;
1099
1100 chained_irq_enter(chip, desc);
1101
1102 cause = mvebu_readl(port, PCIE_INT_CAUSE_OFF);
1103 unmask = mvebu_readl(port, PCIE_INT_UNMASK_OFF);
1104 status = cause & unmask;
1105
1106
1107 for (i = 0; i < PCI_NUM_INTX; i++) {
1108 if (!(status & PCIE_INT_INTX(i)))
1109 continue;
1110
1111 if (generic_handle_domain_irq(port->intx_irq_domain, i) == -EINVAL)
1112 dev_err_ratelimited(dev, "unexpected INT%c IRQ\n", (char)i+'A');
1113 }
1114
1115 chained_irq_exit(chip, desc);
1116 }
1117
1118 static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1119 {
1120
1121 if (dev->bus->number == 0)
1122 return 0;
1123
1124 return of_irq_parse_and_map_pci(dev, slot, pin);
1125 }
1126
1127 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
1128 const struct resource *res,
1129 resource_size_t start,
1130 resource_size_t size,
1131 resource_size_t align)
1132 {
1133 if (dev->bus->number != 0)
1134 return start;
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 if (res->flags & IORESOURCE_IO)
1148 return round_up(start, max_t(resource_size_t, SZ_64K,
1149 rounddown_pow_of_two(size)));
1150 else if (res->flags & IORESOURCE_MEM)
1151 return round_up(start, max_t(resource_size_t, SZ_1M,
1152 rounddown_pow_of_two(size)));
1153 else
1154 return start;
1155 }
1156
1157 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
1158 struct device_node *np,
1159 struct mvebu_pcie_port *port)
1160 {
1161 int ret = 0;
1162
1163 ret = of_address_to_resource(np, 0, &port->regs);
1164 if (ret)
1165 return (void __iomem *)ERR_PTR(ret);
1166
1167 return devm_ioremap_resource(&pdev->dev, &port->regs);
1168 }
1169
1170 #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
1171 #define DT_TYPE_IO 0x1
1172 #define DT_TYPE_MEM32 0x2
1173 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
1174 #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
1175
1176 static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
1177 unsigned long type,
1178 unsigned int *tgt,
1179 unsigned int *attr)
1180 {
1181 const int na = 3, ns = 2;
1182 const __be32 *range;
1183 int rlen, nranges, rangesz, pna, i;
1184
1185 *tgt = -1;
1186 *attr = -1;
1187
1188 range = of_get_property(np, "ranges", &rlen);
1189 if (!range)
1190 return -EINVAL;
1191
1192 pna = of_n_addr_cells(np);
1193 rangesz = pna + na + ns;
1194 nranges = rlen / sizeof(__be32) / rangesz;
1195
1196 for (i = 0; i < nranges; i++, range += rangesz) {
1197 u32 flags = of_read_number(range, 1);
1198 u32 slot = of_read_number(range + 1, 1);
1199 u64 cpuaddr = of_read_number(range + na, pna);
1200 unsigned long rtype;
1201
1202 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
1203 rtype = IORESOURCE_IO;
1204 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
1205 rtype = IORESOURCE_MEM;
1206 else
1207 continue;
1208
1209 if (slot == PCI_SLOT(devfn) && type == rtype) {
1210 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
1211 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
1212 return 0;
1213 }
1214 }
1215
1216 return -ENOENT;
1217 }
1218
1219 static int mvebu_pcie_suspend(struct device *dev)
1220 {
1221 struct mvebu_pcie *pcie;
1222 int i;
1223
1224 pcie = dev_get_drvdata(dev);
1225 for (i = 0; i < pcie->nports; i++) {
1226 struct mvebu_pcie_port *port = pcie->ports + i;
1227 if (!port->base)
1228 continue;
1229 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
1230 }
1231
1232 return 0;
1233 }
1234
1235 static int mvebu_pcie_resume(struct device *dev)
1236 {
1237 struct mvebu_pcie *pcie;
1238 int i;
1239
1240 pcie = dev_get_drvdata(dev);
1241 for (i = 0; i < pcie->nports; i++) {
1242 struct mvebu_pcie_port *port = pcie->ports + i;
1243 if (!port->base)
1244 continue;
1245 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
1246 mvebu_pcie_setup_hw(port);
1247 }
1248
1249 return 0;
1250 }
1251
1252 static void mvebu_pcie_port_clk_put(void *data)
1253 {
1254 struct mvebu_pcie_port *port = data;
1255
1256 clk_put(port->clk);
1257 }
1258
1259 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
1260 struct mvebu_pcie_port *port, struct device_node *child)
1261 {
1262 struct device *dev = &pcie->pdev->dev;
1263 enum of_gpio_flags flags;
1264 u32 slot_power_limit;
1265 int reset_gpio, ret;
1266 u32 num_lanes;
1267
1268 port->pcie = pcie;
1269
1270 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
1271 dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
1272 child);
1273 goto skip;
1274 }
1275
1276 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
1277 port->lane = 0;
1278
1279 if (!of_property_read_u32(child, "num-lanes", &num_lanes) && num_lanes == 4)
1280 port->is_x4 = true;
1281
1282 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
1283 port->lane);
1284 if (!port->name) {
1285 ret = -ENOMEM;
1286 goto err;
1287 }
1288
1289 port->devfn = of_pci_get_devfn(child);
1290 if (port->devfn < 0)
1291 goto skip;
1292 if (PCI_FUNC(port->devfn) != 0) {
1293 dev_err(dev, "%s: invalid function number, must be zero\n",
1294 port->name);
1295 goto skip;
1296 }
1297
1298 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
1299 &port->mem_target, &port->mem_attr);
1300 if (ret < 0) {
1301 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
1302 port->name);
1303 goto skip;
1304 }
1305
1306 if (resource_size(&pcie->io) != 0) {
1307 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
1308 &port->io_target, &port->io_attr);
1309 } else {
1310 port->io_target = -1;
1311 port->io_attr = -1;
1312 }
1313
1314
1315
1316
1317
1318 port->intx_irq = of_irq_get_byname(child, "intx");
1319 if (port->intx_irq == -EPROBE_DEFER) {
1320 ret = port->intx_irq;
1321 goto err;
1322 }
1323 if (port->intx_irq <= 0) {
1324 dev_warn(dev, "%s: legacy INTx interrupts cannot be masked individually, "
1325 "%pOF does not contain intx interrupt\n",
1326 port->name, child);
1327 }
1328
1329 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
1330 if (reset_gpio == -EPROBE_DEFER) {
1331 ret = reset_gpio;
1332 goto err;
1333 }
1334
1335 if (gpio_is_valid(reset_gpio)) {
1336 unsigned long gpio_flags;
1337
1338 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
1339 port->name);
1340 if (!port->reset_name) {
1341 ret = -ENOMEM;
1342 goto err;
1343 }
1344
1345 if (flags & OF_GPIO_ACTIVE_LOW) {
1346 dev_info(dev, "%pOF: reset gpio is active low\n",
1347 child);
1348 gpio_flags = GPIOF_ACTIVE_LOW |
1349 GPIOF_OUT_INIT_LOW;
1350 } else {
1351 gpio_flags = GPIOF_OUT_INIT_HIGH;
1352 }
1353
1354 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
1355 port->reset_name);
1356 if (ret) {
1357 if (ret == -EPROBE_DEFER)
1358 goto err;
1359 goto skip;
1360 }
1361
1362 port->reset_gpio = gpio_to_desc(reset_gpio);
1363 }
1364
1365 slot_power_limit = of_pci_get_slot_power_limit(child,
1366 &port->slot_power_limit_value,
1367 &port->slot_power_limit_scale);
1368 if (slot_power_limit)
1369 dev_info(dev, "%s: Slot power limit %u.%uW\n",
1370 port->name,
1371 slot_power_limit / 1000,
1372 (slot_power_limit / 100) % 10);
1373
1374 port->clk = of_clk_get_by_name(child, NULL);
1375 if (IS_ERR(port->clk)) {
1376 dev_err(dev, "%s: cannot get clock\n", port->name);
1377 goto skip;
1378 }
1379
1380 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
1381 if (ret < 0) {
1382 clk_put(port->clk);
1383 goto err;
1384 }
1385
1386 return 1;
1387
1388 skip:
1389 ret = 0;
1390
1391
1392 devm_kfree(dev, port->reset_name);
1393 port->reset_name = NULL;
1394 devm_kfree(dev, port->name);
1395 port->name = NULL;
1396
1397 err:
1398 return ret;
1399 }
1400
1401
1402
1403
1404
1405
1406 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
1407 {
1408 int ret;
1409
1410 ret = clk_prepare_enable(port->clk);
1411 if (ret < 0)
1412 return ret;
1413
1414 if (port->reset_gpio) {
1415 u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
1416
1417 of_property_read_u32(port->dn, "reset-delay-us",
1418 &reset_udelay);
1419
1420 udelay(100);
1421
1422 gpiod_set_value_cansleep(port->reset_gpio, 0);
1423 msleep(reset_udelay / 1000);
1424 }
1425
1426 return 0;
1427 }
1428
1429
1430
1431
1432
1433 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
1434 {
1435 gpiod_set_value_cansleep(port->reset_gpio, 1);
1436
1437 clk_disable_unprepare(port->clk);
1438 }
1439
1440
1441
1442
1443
1444
1445 static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
1446 {
1447 struct device *dev = &pcie->pdev->dev;
1448 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1449 int ret;
1450
1451
1452 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
1453 if (resource_size(&pcie->mem) == 0) {
1454 dev_err(dev, "invalid memory aperture size\n");
1455 return -EINVAL;
1456 }
1457
1458 pcie->mem.name = "PCI MEM";
1459 pci_add_resource(&bridge->windows, &pcie->mem);
1460 ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
1461 if (ret)
1462 return ret;
1463
1464
1465 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
1466
1467 if (resource_size(&pcie->io) != 0) {
1468 pcie->realio.flags = pcie->io.flags;
1469 pcie->realio.start = PCIBIOS_MIN_IO;
1470 pcie->realio.end = min_t(resource_size_t,
1471 IO_SPACE_LIMIT - SZ_64K,
1472 resource_size(&pcie->io) - 1);
1473 pcie->realio.name = "PCI I/O";
1474
1475 ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
1476 if (ret)
1477 return ret;
1478
1479 pci_add_resource(&bridge->windows, &pcie->realio);
1480 ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
1481 if (ret)
1482 return ret;
1483 }
1484
1485 return 0;
1486 }
1487
1488 static int mvebu_pcie_probe(struct platform_device *pdev)
1489 {
1490 struct device *dev = &pdev->dev;
1491 struct mvebu_pcie *pcie;
1492 struct pci_host_bridge *bridge;
1493 struct device_node *np = dev->of_node;
1494 struct device_node *child;
1495 int num, i, ret;
1496
1497 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1498 if (!bridge)
1499 return -ENOMEM;
1500
1501 pcie = pci_host_bridge_priv(bridge);
1502 pcie->pdev = pdev;
1503 platform_set_drvdata(pdev, pcie);
1504
1505 ret = mvebu_pcie_parse_request_resources(pcie);
1506 if (ret)
1507 return ret;
1508
1509 num = of_get_available_child_count(np);
1510
1511 pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1512 if (!pcie->ports)
1513 return -ENOMEM;
1514
1515 i = 0;
1516 for_each_available_child_of_node(np, child) {
1517 struct mvebu_pcie_port *port = &pcie->ports[i];
1518
1519 ret = mvebu_pcie_parse_port(pcie, port, child);
1520 if (ret < 0) {
1521 of_node_put(child);
1522 return ret;
1523 } else if (ret == 0) {
1524 continue;
1525 }
1526
1527 port->dn = child;
1528 i++;
1529 }
1530 pcie->nports = i;
1531
1532 for (i = 0; i < pcie->nports; i++) {
1533 struct mvebu_pcie_port *port = &pcie->ports[i];
1534 int irq = port->intx_irq;
1535
1536 child = port->dn;
1537 if (!child)
1538 continue;
1539
1540 ret = mvebu_pcie_powerup(port);
1541 if (ret < 0)
1542 continue;
1543
1544 port->base = mvebu_pcie_map_registers(pdev, child, port);
1545 if (IS_ERR(port->base)) {
1546 dev_err(dev, "%s: cannot map registers\n", port->name);
1547 port->base = NULL;
1548 mvebu_pcie_powerdown(port);
1549 continue;
1550 }
1551
1552 ret = mvebu_pci_bridge_emul_init(port);
1553 if (ret < 0) {
1554 dev_err(dev, "%s: cannot init emulated bridge\n",
1555 port->name);
1556 devm_iounmap(dev, port->base);
1557 port->base = NULL;
1558 mvebu_pcie_powerdown(port);
1559 continue;
1560 }
1561
1562 if (irq > 0) {
1563 ret = mvebu_pcie_init_irq_domain(port);
1564 if (ret) {
1565 dev_err(dev, "%s: cannot init irq domain\n",
1566 port->name);
1567 pci_bridge_emul_cleanup(&port->bridge);
1568 devm_iounmap(dev, port->base);
1569 port->base = NULL;
1570 mvebu_pcie_powerdown(port);
1571 continue;
1572 }
1573 irq_set_chained_handler_and_data(irq,
1574 mvebu_pcie_irq_handler,
1575 port);
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 mvebu_pcie_setup_hw(port);
1654 mvebu_pcie_set_local_dev_nr(port, 1);
1655 mvebu_pcie_set_local_bus_nr(port, 0);
1656 }
1657
1658 bridge->sysdata = pcie;
1659 bridge->ops = &mvebu_pcie_ops;
1660 bridge->child_ops = &mvebu_pcie_child_ops;
1661 bridge->align_resource = mvebu_pcie_align_resource;
1662 bridge->map_irq = mvebu_pcie_map_irq;
1663
1664 return pci_host_probe(bridge);
1665 }
1666
1667 static int mvebu_pcie_remove(struct platform_device *pdev)
1668 {
1669 struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
1670 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1671 u32 cmd, sspl;
1672 int i;
1673
1674
1675 pci_lock_rescan_remove();
1676 pci_stop_root_bus(bridge->bus);
1677 pci_remove_root_bus(bridge->bus);
1678 pci_unlock_rescan_remove();
1679
1680 for (i = 0; i < pcie->nports; i++) {
1681 struct mvebu_pcie_port *port = &pcie->ports[i];
1682 int irq = port->intx_irq;
1683
1684 if (!port->base)
1685 continue;
1686
1687
1688 cmd = mvebu_readl(port, PCIE_CMD_OFF);
1689 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1690 mvebu_writel(port, cmd, PCIE_CMD_OFF);
1691
1692
1693 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_UNMASK_OFF);
1694
1695
1696 mvebu_writel(port, ~PCIE_INT_ALL_MASK, PCIE_INT_CAUSE_OFF);
1697
1698 if (irq > 0)
1699 irq_set_chained_handler_and_data(irq, NULL, NULL);
1700
1701
1702 if (port->intx_irq_domain)
1703 irq_domain_remove(port->intx_irq_domain);
1704
1705
1706 pci_bridge_emul_cleanup(&port->bridge);
1707
1708
1709 sspl = mvebu_readl(port, PCIE_SSPL_OFF);
1710 sspl &= ~(PCIE_SSPL_VALUE_MASK | PCIE_SSPL_SCALE_MASK | PCIE_SSPL_ENABLE);
1711 mvebu_writel(port, sspl, PCIE_SSPL_OFF);
1712
1713
1714 mvebu_pcie_disable_wins(port);
1715
1716
1717 if (port->iowin.size)
1718 mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
1719 if (port->memwin.size)
1720 mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
1721
1722
1723 mvebu_pcie_powerdown(port);
1724 }
1725
1726 return 0;
1727 }
1728
1729 static const struct of_device_id mvebu_pcie_of_match_table[] = {
1730 { .compatible = "marvell,armada-xp-pcie", },
1731 { .compatible = "marvell,armada-370-pcie", },
1732 { .compatible = "marvell,dove-pcie", },
1733 { .compatible = "marvell,kirkwood-pcie", },
1734 {},
1735 };
1736
1737 static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1738 NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1739 };
1740
1741 static struct platform_driver mvebu_pcie_driver = {
1742 .driver = {
1743 .name = "mvebu-pcie",
1744 .of_match_table = mvebu_pcie_of_match_table,
1745 .pm = &mvebu_pcie_pm_ops,
1746 },
1747 .probe = mvebu_pcie_probe,
1748 .remove = mvebu_pcie_remove,
1749 };
1750 module_platform_driver(mvebu_pcie_driver);
1751
1752 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
1753 MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
1754 MODULE_DESCRIPTION("Marvell EBU PCIe controller");
1755 MODULE_LICENSE("GPL v2");