0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/delay.h>
0031 #include <linux/types.h>
0032 #include <linux/kernel.h>
0033 #include <linux/spinlock.h>
0034 #include <linux/init.h> /* for __init */
0035 #include <linux/pci.h>
0036 #include <linux/ioport.h>
0037 #include <linux/slab.h>
0038
0039 #include <asm/byteorder.h>
0040 #include <asm/pdc.h>
0041 #include <asm/pdcpat.h>
0042 #include <asm/page.h>
0043
0044 #include <asm/ropes.h>
0045 #include <asm/hardware.h> /* for register_parisc_driver() stuff */
0046 #include <asm/parisc-device.h>
0047 #include <asm/io.h> /* read/write stuff */
0048
0049 #include "iommu.h"
0050
0051 #undef DEBUG_LBA
0052 #undef DEBUG_LBA_PORT
0053 #undef DEBUG_LBA_CFG
0054 #undef DEBUG_LBA_PAT
0055
0056 #undef FBB_SUPPORT
0057
0058
0059 #ifdef DEBUG_LBA
0060 #define DBG(x...) printk(x)
0061 #else
0062 #define DBG(x...)
0063 #endif
0064
0065 #ifdef DEBUG_LBA_PORT
0066 #define DBG_PORT(x...) printk(x)
0067 #else
0068 #define DBG_PORT(x...)
0069 #endif
0070
0071 #ifdef DEBUG_LBA_CFG
0072 #define DBG_CFG(x...) printk(x)
0073 #else
0074 #define DBG_CFG(x...)
0075 #endif
0076
0077 #ifdef DEBUG_LBA_PAT
0078 #define DBG_PAT(x...) printk(x)
0079 #else
0080 #define DBG_PAT(x...)
0081 #endif
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 #define MODULE_NAME "LBA"
0099
0100
0101 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
0102 static void __iomem *astro_iop_base __read_mostly;
0103
0104 static u32 lba_t32;
0105
0106
0107 #define LBA_FLAG_SKIP_PROBE 0x10
0108
0109 #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
0110
0111 static inline struct lba_device *LBA_DEV(struct pci_hba_data *hba)
0112 {
0113 return container_of(hba, struct lba_device, hba);
0114 }
0115
0116
0117
0118
0119
0120 #define LBA_MAX_NUM_BUSES 8
0121
0122
0123
0124
0125
0126
0127
0128 #define READ_U8(addr) __raw_readb(addr)
0129 #define READ_U16(addr) __raw_readw(addr)
0130 #define READ_U32(addr) __raw_readl(addr)
0131 #define WRITE_U8(value, addr) __raw_writeb(value, addr)
0132 #define WRITE_U16(value, addr) __raw_writew(value, addr)
0133 #define WRITE_U32(value, addr) __raw_writel(value, addr)
0134
0135 #define READ_REG8(addr) readb(addr)
0136 #define READ_REG16(addr) readw(addr)
0137 #define READ_REG32(addr) readl(addr)
0138 #define READ_REG64(addr) readq(addr)
0139 #define WRITE_REG8(value, addr) writeb(value, addr)
0140 #define WRITE_REG16(value, addr) writew(value, addr)
0141 #define WRITE_REG32(value, addr) writel(value, addr)
0142
0143
0144 #define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
0145 #define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
0146 #define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
0147 #define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
0148
0149
0150
0151
0152
0153
0154 #define ROPES_PER_IOC 8
0155 #define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
0156
0157
0158 static void
0159 lba_dump_res(struct resource *r, int d)
0160 {
0161 int i;
0162
0163 if (NULL == r)
0164 return;
0165
0166 printk(KERN_DEBUG "(%p)", r->parent);
0167 for (i = d; i ; --i) printk(" ");
0168 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
0169 (long)r->start, (long)r->end, r->flags);
0170 lba_dump_res(r->child, d+2);
0171 lba_dump_res(r->sibling, d);
0172 }
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
0190 {
0191 u8 first_bus = d->hba.hba_bus->busn_res.start;
0192 u8 last_sub_bus = d->hba.hba_bus->busn_res.end;
0193
0194 if ((bus < first_bus) ||
0195 (bus > last_sub_bus) ||
0196 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
0197 return 0;
0198 }
0199
0200 return 1;
0201 }
0202
0203
0204
0205 #define LBA_CFG_SETUP(d, tok) { \
0206 \
0207 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
0208 \
0209 \
0210 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
0211 \
0212
0213
0214 \
0215 \
0216 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
0217 \
0218
0219
0220
0221 \
0222 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
0223 \
0224
0225
0226
0227 \
0228 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
0229 }
0230
0231
0232 #define LBA_CFG_PROBE(d, tok) { \
0233
0234
0235
0236 \
0237 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
0238
0239
0240
0241 \
0242 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
0243
0244
0245
0246 \
0247 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
0248
0249
0250
0251 \
0252 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 #define LBA_MASTER_ABORT_ERROR 0xc
0281 #define LBA_FATAL_ERROR 0x10
0282
0283 #define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
0284 u32 error_status = 0; \
0285
0286
0287
0288 \
0289 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
0290 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
0291 if ((error_status & 0x1f) != 0) { \
0292
0293
0294 \
0295 error = 1; \
0296 if ((error_status & LBA_FATAL_ERROR) == 0) { \
0297
0298
0299
0300 \
0301 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
0302 } \
0303 } \
0304 }
0305
0306 #define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
0307 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
0308
0309 #define LBA_CFG_ADDR_SETUP(d, addr) { \
0310 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
0311
0312
0313
0314 \
0315 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
0316 }
0317
0318
0319 #define LBA_CFG_RESTORE(d, base) { \
0320
0321
0322 \
0323 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
0324
0325
0326 \
0327 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
0328
0329
0330 \
0331 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
0332 }
0333
0334
0335
0336 static unsigned int
0337 lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
0338 {
0339 u32 data = ~0U;
0340 int error = 0;
0341 u32 arb_mask = 0;
0342 u32 error_config = 0;
0343 u32 status_control = 0;
0344
0345 LBA_CFG_SETUP(d, tok);
0346 LBA_CFG_PROBE(d, tok);
0347 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
0348 if (!error) {
0349 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
0350
0351 LBA_CFG_ADDR_SETUP(d, tok | reg);
0352 switch (size) {
0353 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
0354 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
0355 case 4: data = READ_REG32(data_reg); break;
0356 }
0357 }
0358 LBA_CFG_RESTORE(d, d->hba.base_addr);
0359 return(data);
0360 }
0361
0362
0363 static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
0364 {
0365 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
0366 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
0367 u32 tok = LBA_CFG_TOK(local_bus, devfn);
0368 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
0369
0370 if ((pos > 255) || (devfn > 255))
0371 return -EINVAL;
0372
0373
0374 {
0375
0376
0377 *data = lba_rd_cfg(d, tok, pos, size);
0378 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
0379 return 0;
0380 }
0381
0382 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) {
0383 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
0384
0385 *data = ~0U;
0386 return(0);
0387 }
0388
0389
0390
0391
0392
0393 LBA_CFG_ADDR_SETUP(d, tok | pos);
0394 switch(size) {
0395 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
0396 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
0397 case 4: *data = READ_REG32(data_reg); break;
0398 }
0399 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
0400 return 0;
0401 }
0402
0403
0404 static void
0405 lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
0406 {
0407 int error = 0;
0408 u32 arb_mask = 0;
0409 u32 error_config = 0;
0410 u32 status_control = 0;
0411 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
0412
0413 LBA_CFG_SETUP(d, tok);
0414 LBA_CFG_ADDR_SETUP(d, tok | reg);
0415 switch (size) {
0416 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
0417 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
0418 case 4: WRITE_REG32(data, data_reg); break;
0419 }
0420 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
0421 LBA_CFG_RESTORE(d, d->hba.base_addr);
0422 }
0423
0424
0425
0426
0427
0428
0429
0430 static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
0431 {
0432 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
0433 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
0434 u32 tok = LBA_CFG_TOK(local_bus,devfn);
0435
0436 if ((pos > 255) || (devfn > 255))
0437 return -EINVAL;
0438
0439 if (!LBA_SKIP_PROBE(d)) {
0440
0441 lba_wr_cfg(d, tok, pos, (u32) data, size);
0442 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
0443 return 0;
0444 }
0445
0446 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) {
0447 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
0448 return 1;
0449 }
0450
0451 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
0452
0453
0454 LBA_CFG_ADDR_SETUP(d, tok | pos);
0455 switch(size) {
0456 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
0457 break;
0458 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
0459 break;
0460 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
0461 break;
0462 }
0463
0464 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
0465 return 0;
0466 }
0467
0468
0469 static struct pci_ops elroy_cfg_ops = {
0470 .read = elroy_cfg_read,
0471 .write = elroy_cfg_write,
0472 };
0473
0474
0475
0476
0477
0478
0479
0480 static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
0481 {
0482 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
0483 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
0484 u32 tok = LBA_CFG_TOK(local_bus, devfn);
0485 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
0486
0487 if ((pos > 255) || (devfn > 255))
0488 return -EINVAL;
0489
0490 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
0491 switch(size) {
0492 case 1:
0493 *data = READ_REG8(data_reg + (pos & 3));
0494 break;
0495 case 2:
0496 *data = READ_REG16(data_reg + (pos & 2));
0497 break;
0498 case 4:
0499 *data = READ_REG32(data_reg); break;
0500 break;
0501 }
0502
0503 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
0504 return 0;
0505 }
0506
0507
0508
0509
0510
0511
0512 static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
0513 {
0514 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
0515 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
0516 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
0517 u32 tok = LBA_CFG_TOK(local_bus,devfn);
0518
0519 if ((pos > 255) || (devfn > 255))
0520 return -EINVAL;
0521
0522 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
0523
0524 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
0525 switch(size) {
0526 case 1:
0527 WRITE_REG8 (data, data_reg + (pos & 3));
0528 break;
0529 case 2:
0530 WRITE_REG16(data, data_reg + (pos & 2));
0531 break;
0532 case 4:
0533 WRITE_REG32(data, data_reg);
0534 break;
0535 }
0536
0537
0538 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
0539 return 0;
0540 }
0541
0542 static struct pci_ops mercury_cfg_ops = {
0543 .read = mercury_cfg_read,
0544 .write = mercury_cfg_write,
0545 };
0546
0547
0548 static void
0549 lba_bios_init(void)
0550 {
0551 DBG(MODULE_NAME ": lba_bios_init\n");
0552 }
0553
0554
0555 #ifdef CONFIG_64BIT
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 static unsigned long
0571 truncate_pat_collision(struct resource *root, struct resource *new)
0572 {
0573 unsigned long start = new->start;
0574 unsigned long end = new->end;
0575 struct resource *tmp = root->child;
0576
0577 if (end <= start || start < root->start || !tmp)
0578 return 0;
0579
0580
0581 while (tmp && tmp->end < start)
0582 tmp = tmp->sibling;
0583
0584
0585 if (!tmp) return 0;
0586
0587
0588
0589
0590 if (tmp->start >= end) return 0;
0591
0592 if (tmp->start <= start) {
0593
0594 new->start = tmp->end + 1;
0595
0596 if (tmp->end >= end) {
0597
0598 return 1;
0599 }
0600 }
0601
0602 if (tmp->end < end ) {
0603
0604 new->end = tmp->start - 1;
0605 }
0606
0607 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
0608 "to [%lx,%lx]\n",
0609 start, end,
0610 (long)new->start, (long)new->end );
0611
0612 return 0;
0613 }
0614
0615
0616
0617
0618
0619
0620
0621 static unsigned long
0622 extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len)
0623 {
0624 struct resource *tmp;
0625
0626
0627 if (boot_cpu_data.cpu_type < mako)
0628 return end;
0629
0630 pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n",
0631 end - start, lba_len);
0632
0633 lba_len = min(lba_len+1, 256UL*1024*1024);
0634
0635 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end);
0636
0637
0638 end += lba_len;
0639 if (end < start)
0640 end = -1ULL;
0641
0642 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end);
0643
0644
0645 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
0646 pr_debug("LBA: testing %pR\n", tmp);
0647 if (tmp->start == start)
0648 continue;
0649 if (tmp->end < start)
0650 continue;
0651 if (tmp->start > end)
0652 continue;
0653 if (end >= tmp->start)
0654 end = tmp->start - 1;
0655 }
0656
0657 pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end);
0658
0659
0660 return end;
0661 }
0662
0663 #else
0664 #define truncate_pat_collision(r,n) (0)
0665 #endif
0666
0667 static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
0668 {
0669 int idx;
0670 struct resource *r;
0671
0672 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
0673 r = &dev->resource[idx];
0674 if (!r->flags)
0675 continue;
0676 if (r->parent)
0677 continue;
0678 if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
0679
0680
0681
0682
0683
0684
0685 r->start = r->end = 0;
0686 r->flags = 0;
0687 }
0688 }
0689 }
0690
0691 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
0692 {
0693 struct pci_bus *child;
0694
0695
0696 if (bus->self)
0697 pcibios_allocate_bridge_resources(bus->self);
0698 list_for_each_entry(child, &bus->children, node)
0699 pcibios_allocate_bus_resources(child);
0700 }
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712 static void
0713 lba_fixup_bus(struct pci_bus *bus)
0714 {
0715 struct pci_dev *dev;
0716 #ifdef FBB_SUPPORT
0717 u16 status;
0718 #endif
0719 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
0720
0721 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
0722 bus, (int)bus->busn_res.start, bus->bridge->platform_data);
0723
0724
0725
0726
0727
0728 if (bus->parent) {
0729
0730 pci_read_bridge_bases(bus);
0731
0732
0733 pcibios_allocate_bus_resources(bus);
0734 } else {
0735
0736 int err;
0737
0738 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
0739 ldev->hba.io_space.name,
0740 ldev->hba.io_space.start, ldev->hba.io_space.end,
0741 ldev->hba.io_space.flags);
0742 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
0743 ldev->hba.lmmio_space.name,
0744 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
0745 ldev->hba.lmmio_space.flags);
0746
0747 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
0748 if (err < 0) {
0749 lba_dump_res(&ioport_resource, 2);
0750 BUG();
0751 }
0752
0753 if (ldev->hba.elmmio_space.flags) {
0754 err = request_resource(&iomem_resource,
0755 &(ldev->hba.elmmio_space));
0756 if (err < 0) {
0757
0758 printk("FAILED: lba_fixup_bus() request for "
0759 "elmmio_space [%lx/%lx]\n",
0760 (long)ldev->hba.elmmio_space.start,
0761 (long)ldev->hba.elmmio_space.end);
0762
0763
0764
0765 }
0766 }
0767
0768 if (ldev->hba.lmmio_space.flags) {
0769 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
0770 if (err < 0) {
0771 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
0772 "lmmio_space [%lx/%lx]\n",
0773 (long)ldev->hba.lmmio_space.start,
0774 (long)ldev->hba.lmmio_space.end);
0775 }
0776 }
0777
0778 #ifdef CONFIG_64BIT
0779
0780 if (ldev->hba.gmmio_space.flags) {
0781 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
0782 if (err < 0) {
0783 printk("FAILED: lba_fixup_bus() request for "
0784 "gmmio_space [%lx/%lx]\n",
0785 (long)ldev->hba.gmmio_space.start,
0786 (long)ldev->hba.gmmio_space.end);
0787 lba_dump_res(&iomem_resource, 2);
0788 BUG();
0789 }
0790 }
0791 #endif
0792
0793 }
0794
0795 list_for_each_entry(dev, &bus->devices, bus_list) {
0796 int i;
0797
0798 DBG("lba_fixup_bus() %s\n", pci_name(dev));
0799
0800
0801 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
0802 struct resource *res = &dev->resource[i];
0803
0804
0805 if (!res->start)
0806 continue;
0807
0808
0809
0810
0811
0812
0813 pci_claim_resource(dev, i);
0814 }
0815
0816 #ifdef FBB_SUPPORT
0817
0818
0819
0820
0821 (void) pci_read_config_word(dev, PCI_STATUS, &status);
0822 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
0823 #endif
0824
0825
0826
0827
0828 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
0829 pcibios_init_bridge(dev);
0830 continue;
0831 }
0832
0833
0834 iosapic_fixup_irq(ldev->iosapic_obj, dev);
0835 }
0836
0837 #ifdef FBB_SUPPORT
0838
0839
0840
0841
0842 if (fbb_enable) {
0843 if (bus->parent) {
0844 u8 control;
0845
0846 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
0847 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
0848
0849 } else {
0850
0851 }
0852 fbb_enable = PCI_COMMAND_FAST_BACK;
0853 }
0854
0855
0856 list_for_each_entry(dev, &bus->devices, bus_list) {
0857 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
0858 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
0859 (void) pci_write_config_word(dev, PCI_COMMAND, status);
0860 }
0861 #endif
0862 }
0863
0864
0865 static struct pci_bios_ops lba_bios_ops = {
0866 .init = lba_bios_init,
0867 .fixup_bus = lba_fixup_bus,
0868 };
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 #define LBA_PORT_IN(size, mask) \
0888 static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
0889 { \
0890 u##size t; \
0891 t = READ_REG##size(astro_iop_base + addr); \
0892 DBG_PORT(" 0x%x\n", t); \
0893 return (t); \
0894 }
0895
0896 LBA_PORT_IN( 8, 3)
0897 LBA_PORT_IN(16, 2)
0898 LBA_PORT_IN(32, 0)
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 #define LBA_PORT_OUT(size, mask) \
0929 static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
0930 { \
0931 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
0932 WRITE_REG##size(val, astro_iop_base + addr); \
0933 if (LBA_DEV(d)->hw_rev < 3) \
0934 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
0935 }
0936
0937 LBA_PORT_OUT( 8, 3)
0938 LBA_PORT_OUT(16, 2)
0939 LBA_PORT_OUT(32, 0)
0940
0941
0942 static struct pci_port_ops lba_astro_port_ops = {
0943 .inb = lba_astro_in8,
0944 .inw = lba_astro_in16,
0945 .inl = lba_astro_in32,
0946 .outb = lba_astro_out8,
0947 .outw = lba_astro_out16,
0948 .outl = lba_astro_out32
0949 };
0950
0951
0952 #ifdef CONFIG_64BIT
0953 #define PIOP_TO_GMMIO(lba, addr) \
0954 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 #undef LBA_PORT_IN
0969 #define LBA_PORT_IN(size, mask) \
0970 static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
0971 { \
0972 u##size t; \
0973 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
0974 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
0975 DBG_PORT(" 0x%x\n", t); \
0976 return (t); \
0977 }
0978
0979 LBA_PORT_IN( 8, 3)
0980 LBA_PORT_IN(16, 2)
0981 LBA_PORT_IN(32, 0)
0982
0983
0984 #undef LBA_PORT_OUT
0985 #define LBA_PORT_OUT(size, mask) \
0986 static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
0987 { \
0988 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
0989 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
0990 WRITE_REG##size(val, where); \
0991 \
0992 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
0993 }
0994
0995 LBA_PORT_OUT( 8, 3)
0996 LBA_PORT_OUT(16, 2)
0997 LBA_PORT_OUT(32, 0)
0998
0999
1000 static struct pci_port_ops lba_pat_port_ops = {
1001 .inb = lba_pat_in8,
1002 .inw = lba_pat_in16,
1003 .inl = lba_pat_in32,
1004 .outb = lba_pat_out8,
1005 .outw = lba_pat_out16,
1006 .outl = lba_pat_out32
1007 };
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 static void
1018 lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1019 {
1020 unsigned long bytecnt;
1021 long io_count;
1022 long status;
1023 long pa_count;
1024 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
1025 pdc_pat_cell_mod_maddr_block_t *io_pdc_cell;
1026 int i;
1027
1028 pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
1029 if (!pa_pdc_cell)
1030 return;
1031
1032 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
1033 if (!io_pdc_cell) {
1034 kfree(pa_pdc_cell);
1035 return;
1036 }
1037
1038
1039 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1040 PA_VIEW, pa_pdc_cell);
1041 pa_count = pa_pdc_cell->mod[1];
1042
1043 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1044 IO_VIEW, io_pdc_cell);
1045 io_count = io_pdc_cell->mod[1];
1046
1047
1048 if (status != PDC_OK) {
1049 panic("pdc_pat_cell_module() call failed for LBA!\n");
1050 }
1051
1052 if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
1053 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1054 }
1055
1056
1057
1058
1059 for (i = 0; i < pa_count; i++) {
1060 struct {
1061 unsigned long type;
1062 unsigned long start;
1063 unsigned long end;
1064 } *p, *io;
1065 struct resource *r;
1066
1067 p = (void *) &(pa_pdc_cell->mod[2+i*3]);
1068 io = (void *) &(io_pdc_cell->mod[2+i*3]);
1069
1070
1071 switch(p->type & 0xff) {
1072 case PAT_PBNUM:
1073 lba_dev->hba.bus_num.start = p->start;
1074 lba_dev->hba.bus_num.end = p->end;
1075 lba_dev->hba.bus_num.flags = IORESOURCE_BUS;
1076 break;
1077
1078 case PAT_LMMIO:
1079
1080 if (!lba_dev->hba.lmmio_space.flags) {
1081 unsigned long lba_len;
1082
1083 lba_len = ~READ_REG32(lba_dev->hba.base_addr
1084 + LBA_LMMIO_MASK);
1085 if ((p->end - p->start) != lba_len)
1086 p->end = extend_lmmio_len(p->start,
1087 p->end, lba_len);
1088
1089 sprintf(lba_dev->hba.lmmio_name,
1090 "PCI%02x LMMIO",
1091 (int)lba_dev->hba.bus_num.start);
1092 lba_dev->hba.lmmio_space_offset = p->start -
1093 io->start;
1094 r = &lba_dev->hba.lmmio_space;
1095 r->name = lba_dev->hba.lmmio_name;
1096 } else if (!lba_dev->hba.elmmio_space.flags) {
1097 sprintf(lba_dev->hba.elmmio_name,
1098 "PCI%02x ELMMIO",
1099 (int)lba_dev->hba.bus_num.start);
1100 r = &lba_dev->hba.elmmio_space;
1101 r->name = lba_dev->hba.elmmio_name;
1102 } else {
1103 printk(KERN_WARNING MODULE_NAME
1104 " only supports 2 LMMIO resources!\n");
1105 break;
1106 }
1107
1108 r->start = p->start;
1109 r->end = p->end;
1110 r->flags = IORESOURCE_MEM;
1111 r->parent = r->sibling = r->child = NULL;
1112 break;
1113
1114 case PAT_GMMIO:
1115
1116 sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
1117 (int)lba_dev->hba.bus_num.start);
1118 r = &lba_dev->hba.gmmio_space;
1119 r->name = lba_dev->hba.gmmio_name;
1120 r->start = p->start;
1121 r->end = p->end;
1122 r->flags = IORESOURCE_MEM;
1123 r->parent = r->sibling = r->child = NULL;
1124 break;
1125
1126 case PAT_NPIOP:
1127 printk(KERN_WARNING MODULE_NAME
1128 " range[%d] : ignoring NPIOP (0x%lx)\n",
1129 i, p->start);
1130 break;
1131
1132 case PAT_PIOP:
1133
1134
1135
1136
1137 lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
1138
1139 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1140 (int)lba_dev->hba.bus_num.start);
1141 r = &lba_dev->hba.io_space;
1142 r->name = lba_dev->hba.io_name;
1143 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1144 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1145 r->flags = IORESOURCE_IO;
1146 r->parent = r->sibling = r->child = NULL;
1147 break;
1148
1149 default:
1150 printk(KERN_WARNING MODULE_NAME
1151 " range[%d] : unknown pat range type (0x%lx)\n",
1152 i, p->type & 0xff);
1153 break;
1154 }
1155 }
1156
1157 kfree(pa_pdc_cell);
1158 kfree(io_pdc_cell);
1159 }
1160 #else
1161
1162 #define lba_pat_port_ops lba_astro_port_ops
1163 #define lba_pat_resources(pa_dev, lba_dev)
1164 #endif
1165
1166
1167 extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1168 extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1169
1170
1171 static void
1172 lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1173 {
1174 struct resource *r;
1175 int lba_num;
1176
1177 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1178
1179
1180
1181
1182
1183
1184
1185
1186 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1187 r = &(lba_dev->hba.bus_num);
1188 r->name = "LBA PCI Busses";
1189 r->start = lba_num & 0xff;
1190 r->end = (lba_num>>8) & 0xff;
1191 r->flags = IORESOURCE_BUS;
1192
1193
1194
1195
1196 r = &(lba_dev->hba.lmmio_space);
1197 sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
1198 (int)lba_dev->hba.bus_num.start);
1199 r->name = lba_dev->hba.lmmio_name;
1200
1201 #if 1
1202
1203
1204
1205
1206 sba_distributed_lmmio(pa_dev, r);
1207 #else
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1269 if (r->start & 1) {
1270 unsigned long rsize;
1271
1272 r->flags = IORESOURCE_MEM;
1273
1274 r->start &= mmio_mask;
1275 r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start);
1276 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1277
1278
1279
1280
1281
1282 rsize /= ROPES_PER_IOC;
1283 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1284 r->end = r->start + rsize;
1285 } else {
1286 r->end = r->start = 0;
1287 }
1288 #endif
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 r = &(lba_dev->hba.elmmio_space);
1306 sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
1307 (int)lba_dev->hba.bus_num.start);
1308 r->name = lba_dev->hba.elmmio_name;
1309
1310 #if 1
1311
1312 sba_directed_lmmio(pa_dev, r);
1313 #else
1314 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1315
1316 if (r->start & 1) {
1317 unsigned long rsize;
1318 r->flags = IORESOURCE_MEM;
1319
1320 r->start &= mmio_mask;
1321 r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start);
1322 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1323 r->end = r->start + ~rsize;
1324 }
1325 #endif
1326
1327 r = &(lba_dev->hba.io_space);
1328 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1329 (int)lba_dev->hba.bus_num.start);
1330 r->name = lba_dev->hba.io_name;
1331 r->flags = IORESOURCE_IO;
1332 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1333 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1334
1335
1336 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1337 r->start |= lba_num;
1338 r->end |= lba_num;
1339 }
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 static int __init
1355 lba_hw_init(struct lba_device *d)
1356 {
1357 u32 stat;
1358 u32 bus_reset;
1359
1360 #if 0
1361 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1362 d->hba.base_addr,
1363 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1364 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1365 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1366 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1367 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1368 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1369 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1370 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1371 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1372 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1373 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1374 printk(KERN_DEBUG " HINT reg ");
1375 { int i;
1376 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1377 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1378 }
1379 printk("\n");
1380 #endif
1381
1382 #ifdef CONFIG_64BIT
1383
1384
1385
1386
1387
1388 #endif
1389
1390
1391 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1392 if (bus_reset) {
1393 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1394 }
1395
1396 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1397 if (stat & LBA_SMART_MODE) {
1398 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1399 stat &= ~LBA_SMART_MODE;
1400 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1419 #if defined(ENABLE_HARDFAIL)
1420 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1421 #else
1422 WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1423 #endif
1424
1425
1426
1427
1428
1429
1430 if (bus_reset)
1431 mdelay(pci_post_reset_delay);
1432
1433 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1444 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1445 }
1446
1447
1448
1449
1450
1451
1452 return 0;
1453 }
1454
1455
1456
1457
1458
1459
1460
1461
1462 static unsigned int lba_next_bus = 0;
1463
1464
1465
1466
1467
1468
1469 static int __init
1470 lba_driver_probe(struct parisc_device *dev)
1471 {
1472 struct lba_device *lba_dev;
1473 LIST_HEAD(resources);
1474 struct pci_bus *lba_bus;
1475 struct pci_ops *cfg_ops;
1476 u32 func_class;
1477 void *tmp_obj;
1478 char *version;
1479 void __iomem *addr;
1480 int max;
1481
1482 addr = ioremap(dev->hpa.start, 4096);
1483 if (addr == NULL)
1484 return -ENOMEM;
1485
1486
1487 func_class = READ_REG32(addr + LBA_FCLASS);
1488
1489 if (IS_ELROY(dev)) {
1490 func_class &= 0xf;
1491 switch (func_class) {
1492 case 0: version = "TR1.0"; break;
1493 case 1: version = "TR2.0"; break;
1494 case 2: version = "TR2.1"; break;
1495 case 3: version = "TR2.2"; break;
1496 case 4: version = "TR3.0"; break;
1497 case 5: version = "TR4.0"; break;
1498 default: version = "TR4+";
1499 }
1500
1501 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1502 version, func_class & 0xf, (long)dev->hpa.start);
1503
1504 if (func_class < 2) {
1505 printk(KERN_WARNING "Can't support LBA older than "
1506 "TR2.1 - continuing under adversity.\n");
1507 }
1508
1509 #if 0
1510
1511
1512
1513 if (func_class > 4) {
1514 cfg_ops = &mercury_cfg_ops;
1515 } else
1516 #endif
1517 {
1518 cfg_ops = &elroy_cfg_ops;
1519 }
1520
1521 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1522 int major, minor;
1523
1524 func_class &= 0xff;
1525 major = func_class >> 4, minor = func_class & 0xf;
1526
1527
1528
1529
1530 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1531 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1532 minor, func_class, (long)dev->hpa.start);
1533
1534 cfg_ops = &mercury_cfg_ops;
1535 } else {
1536 printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
1537 (long)dev->hpa.start);
1538 return -ENODEV;
1539 }
1540
1541
1542 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1543
1544
1545
1546
1547
1548 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1549 if (!lba_dev) {
1550 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1551 return(1);
1552 }
1553
1554
1555
1556
1557 lba_dev->hw_rev = func_class;
1558 lba_dev->hba.base_addr = addr;
1559 lba_dev->hba.dev = dev;
1560 lba_dev->iosapic_obj = tmp_obj;
1561 lba_dev->hba.iommu = sba_get_iommu(dev);
1562 parisc_set_drvdata(dev, lba_dev);
1563
1564
1565 pci_bios = &lba_bios_ops;
1566 pcibios_register_hba(&lba_dev->hba);
1567 spin_lock_init(&lba_dev->lba_lock);
1568
1569 if (lba_hw_init(lba_dev))
1570 return(1);
1571
1572
1573
1574 if (is_pdc_pat()) {
1575
1576 pci_port = &lba_pat_port_ops;
1577
1578 lba_pat_resources(dev, lba_dev);
1579 } else {
1580 if (!astro_iop_base) {
1581
1582 astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
1583 pci_port = &lba_astro_port_ops;
1584 }
1585
1586
1587 lba_legacy_resources(dev, lba_dev);
1588 }
1589
1590 if (lba_dev->hba.bus_num.start < lba_next_bus)
1591 lba_dev->hba.bus_num.start = lba_next_bus;
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 if (truncate_pat_collision(&iomem_resource,
1603 &(lba_dev->hba.lmmio_space))) {
1604 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
1605 (long)lba_dev->hba.lmmio_space.start,
1606 (long)lba_dev->hba.lmmio_space.end);
1607 lba_dev->hba.lmmio_space.flags = 0;
1608 }
1609
1610 pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
1611 HBA_PORT_BASE(lba_dev->hba.hba_num));
1612 if (lba_dev->hba.elmmio_space.flags)
1613 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
1614 lba_dev->hba.lmmio_space_offset);
1615 if (lba_dev->hba.lmmio_space.flags)
1616 pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
1617 lba_dev->hba.lmmio_space_offset);
1618 if (lba_dev->hba.gmmio_space.flags) {
1619
1620
1621
1622 }
1623
1624 pci_add_resource(&resources, &lba_dev->hba.bus_num);
1625
1626 dev->dev.platform_data = lba_dev;
1627 lba_bus = lba_dev->hba.hba_bus =
1628 pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
1629 cfg_ops, NULL, &resources);
1630 if (!lba_bus) {
1631 pci_free_resource_list(&resources);
1632 return 0;
1633 }
1634
1635 max = pci_scan_child_bus(lba_bus);
1636
1637
1638 if (is_pdc_pat()) {
1639
1640
1641 DBG_PAT("LBA pci_bus_size_bridges()\n");
1642 pci_bus_size_bridges(lba_bus);
1643
1644 DBG_PAT("LBA pci_bus_assign_resources()\n");
1645 pci_bus_assign_resources(lba_bus);
1646
1647 #ifdef DEBUG_LBA_PAT
1648 DBG_PAT("\nLBA PIOP resource tree\n");
1649 lba_dump_res(&lba_dev->hba.io_space, 2);
1650 DBG_PAT("\nLBA LMMIO resource tree\n");
1651 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1652 #endif
1653 }
1654
1655
1656
1657
1658
1659
1660 if (cfg_ops == &elroy_cfg_ops) {
1661 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1662 }
1663
1664 lba_next_bus = max + 1;
1665 pci_bus_add_devices(lba_bus);
1666
1667
1668 return 0;
1669 }
1670
1671 static const struct parisc_device_id lba_tbl[] __initconst = {
1672 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1673 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1674 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1675 { 0, }
1676 };
1677
1678 static struct parisc_driver lba_driver __refdata = {
1679 .name = MODULE_NAME,
1680 .id_table = lba_tbl,
1681 .probe = lba_driver_probe,
1682 };
1683
1684
1685
1686
1687
1688 void __init lba_init(void)
1689 {
1690 register_parisc_driver(&lba_driver);
1691 }
1692
1693
1694
1695
1696
1697
1698 void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1699 {
1700 void __iomem * base_addr = ioremap(lba->hpa.start, 4096);
1701
1702 imask <<= 2;
1703
1704
1705 WARN_ON((ibase & 0x001fffff) != 0);
1706 WARN_ON((imask & 0x001fffff) != 0);
1707
1708 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1709 WRITE_REG32( imask, base_addr + LBA_IMASK);
1710 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1711 iounmap(base_addr);
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724 static void quirk_diva_ati_card(struct pci_dev *dev)
1725 {
1726 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1727 dev->subsystem_device != 0x1292)
1728 return;
1729
1730 dev_info(&dev->dev, "Hiding Diva built-in ATI card");
1731 dev->device = 0;
1732 }
1733 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
1734 quirk_diva_ati_card);
1735
1736 static void quirk_diva_aux_disable(struct pci_dev *dev)
1737 {
1738 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1739 dev->subsystem_device != 0x1291)
1740 return;
1741
1742 dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
1743 dev->device = 0;
1744 }
1745 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
1746 quirk_diva_aux_disable);
1747
1748 static void quirk_tosca_aux_disable(struct pci_dev *dev)
1749 {
1750 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1751 dev->subsystem_device != 0x104a)
1752 return;
1753
1754 dev_info(&dev->dev, "Hiding Tosca secondary built-in AUX serial device");
1755 dev->device = 0;
1756 }
1757 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA,
1758 quirk_tosca_aux_disable);