0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/kernel.h>
0016 #include <linux/pci.h>
0017 #include <linux/string.h>
0018 #include <linux/init.h>
0019 #include <linux/delay.h>
0020 #include <linux/export.h>
0021 #include <linux/of_address.h>
0022 #include <linux/of_pci.h>
0023 #include <linux/mm.h>
0024 #include <linux/shmem_fs.h>
0025 #include <linux/list.h>
0026 #include <linux/syscalls.h>
0027 #include <linux/irq.h>
0028 #include <linux/vmalloc.h>
0029 #include <linux/slab.h>
0030 #include <linux/vgaarb.h>
0031 #include <linux/numa.h>
0032 #include <linux/msi.h>
0033 #include <linux/irqdomain.h>
0034
0035 #include <asm/processor.h>
0036 #include <asm/io.h>
0037 #include <asm/pci-bridge.h>
0038 #include <asm/byteorder.h>
0039 #include <asm/machdep.h>
0040 #include <asm/ppc-pci.h>
0041 #include <asm/eeh.h>
0042 #include <asm/setup.h>
0043
0044 #include "../../../drivers/pci/pci.h"
0045
0046
0047 static DEFINE_SPINLOCK(hose_spinlock);
0048 LIST_HEAD(hose_list);
0049
0050
0051 #define MAX_PHBS 0x10000
0052
0053
0054
0055
0056
0057 static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
0058
0059
0060 resource_size_t isa_mem_base;
0061 EXPORT_SYMBOL(isa_mem_base);
0062
0063
0064 static const struct dma_map_ops *pci_dma_ops;
0065
0066 void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
0067 {
0068 pci_dma_ops = dma_ops;
0069 }
0070
0071 static int get_phb_number(struct device_node *dn)
0072 {
0073 int ret, phb_id = -1;
0074 u64 prop;
0075
0076
0077
0078
0079
0080
0081
0082
0083 ret = of_get_pci_domain_nr(dn);
0084 if (ret >= 0) {
0085 prop = ret;
0086 ret = 0;
0087 }
0088 if (ret)
0089 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
0090
0091 if (ret) {
0092 ret = of_alias_get_id(dn, "pci");
0093 if (ret >= 0) {
0094 prop = ret;
0095 ret = 0;
0096 }
0097 }
0098 if (ret) {
0099 u32 prop_32;
0100 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
0101 prop = prop_32;
0102 }
0103
0104 if (!ret)
0105 phb_id = (int)(prop & (MAX_PHBS - 1));
0106
0107 spin_lock(&hose_spinlock);
0108
0109
0110 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
0111 goto out_unlock;
0112
0113
0114 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
0115 BUG_ON(phb_id >= MAX_PHBS);
0116 set_bit(phb_id, phb_bitmap);
0117
0118 out_unlock:
0119 spin_unlock(&hose_spinlock);
0120
0121 return phb_id;
0122 }
0123
0124 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
0125 {
0126 struct pci_controller *phb;
0127
0128 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
0129 if (phb == NULL)
0130 return NULL;
0131
0132 phb->global_number = get_phb_number(dev);
0133
0134 spin_lock(&hose_spinlock);
0135 list_add_tail(&phb->list_node, &hose_list);
0136 spin_unlock(&hose_spinlock);
0137
0138 phb->dn = dev;
0139 phb->is_dynamic = slab_is_available();
0140 #ifdef CONFIG_PPC64
0141 if (dev) {
0142 int nid = of_node_to_nid(dev);
0143
0144 if (nid < 0 || !node_online(nid))
0145 nid = NUMA_NO_NODE;
0146
0147 PHB_SET_NODE(phb, nid);
0148 }
0149 #endif
0150 return phb;
0151 }
0152 EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
0153
0154 void pcibios_free_controller(struct pci_controller *phb)
0155 {
0156 spin_lock(&hose_spinlock);
0157
0158
0159 if (phb->global_number < MAX_PHBS)
0160 clear_bit(phb->global_number, phb_bitmap);
0161
0162 list_del(&phb->list_node);
0163 spin_unlock(&hose_spinlock);
0164
0165 if (phb->is_dynamic)
0166 kfree(phb);
0167 }
0168 EXPORT_SYMBOL_GPL(pcibios_free_controller);
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
0196 {
0197 struct pci_controller *phb = (struct pci_controller *)
0198 bridge->release_data;
0199
0200 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
0201
0202 pcibios_free_controller(phb);
0203 }
0204 EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
0205
0206
0207
0208
0209
0210
0211
0212 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
0213 unsigned long type)
0214 {
0215 struct pci_controller *phb = pci_bus_to_host(bus);
0216
0217 if (phb->controller_ops.window_alignment)
0218 return phb->controller_ops.window_alignment(bus, type);
0219
0220
0221
0222
0223
0224
0225 return 1;
0226 }
0227
0228 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
0229 {
0230 struct pci_controller *hose = pci_bus_to_host(bus);
0231
0232 if (hose->controller_ops.setup_bridge)
0233 hose->controller_ops.setup_bridge(bus, type);
0234 }
0235
0236 void pcibios_reset_secondary_bus(struct pci_dev *dev)
0237 {
0238 struct pci_controller *phb = pci_bus_to_host(dev->bus);
0239
0240 if (phb->controller_ops.reset_secondary_bus) {
0241 phb->controller_ops.reset_secondary_bus(dev);
0242 return;
0243 }
0244
0245 pci_reset_secondary_bus(dev);
0246 }
0247
0248 resource_size_t pcibios_default_alignment(void)
0249 {
0250 if (ppc_md.pcibios_default_alignment)
0251 return ppc_md.pcibios_default_alignment();
0252
0253 return 0;
0254 }
0255
0256 #ifdef CONFIG_PCI_IOV
0257 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
0258 {
0259 if (ppc_md.pcibios_iov_resource_alignment)
0260 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
0261
0262 return pci_iov_resource_size(pdev, resno);
0263 }
0264
0265 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
0266 {
0267 if (ppc_md.pcibios_sriov_enable)
0268 return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
0269
0270 return 0;
0271 }
0272
0273 int pcibios_sriov_disable(struct pci_dev *pdev)
0274 {
0275 if (ppc_md.pcibios_sriov_disable)
0276 return ppc_md.pcibios_sriov_disable(pdev);
0277
0278 return 0;
0279 }
0280
0281 #endif
0282
0283 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
0284 {
0285 #ifdef CONFIG_PPC64
0286 return hose->pci_io_size;
0287 #else
0288 return resource_size(&hose->io_resource);
0289 #endif
0290 }
0291
0292 int pcibios_vaddr_is_ioport(void __iomem *address)
0293 {
0294 int ret = 0;
0295 struct pci_controller *hose;
0296 resource_size_t size;
0297
0298 spin_lock(&hose_spinlock);
0299 list_for_each_entry(hose, &hose_list, list_node) {
0300 size = pcibios_io_size(hose);
0301 if (address >= hose->io_base_virt &&
0302 address < (hose->io_base_virt + size)) {
0303 ret = 1;
0304 break;
0305 }
0306 }
0307 spin_unlock(&hose_spinlock);
0308 return ret;
0309 }
0310
0311 unsigned long pci_address_to_pio(phys_addr_t address)
0312 {
0313 struct pci_controller *hose;
0314 resource_size_t size;
0315 unsigned long ret = ~0;
0316
0317 spin_lock(&hose_spinlock);
0318 list_for_each_entry(hose, &hose_list, list_node) {
0319 size = pcibios_io_size(hose);
0320 if (address >= hose->io_base_phys &&
0321 address < (hose->io_base_phys + size)) {
0322 unsigned long base =
0323 (unsigned long)hose->io_base_virt - _IO_BASE;
0324 ret = base + (address - hose->io_base_phys);
0325 break;
0326 }
0327 }
0328 spin_unlock(&hose_spinlock);
0329
0330 return ret;
0331 }
0332 EXPORT_SYMBOL_GPL(pci_address_to_pio);
0333
0334
0335
0336
0337 int pci_domain_nr(struct pci_bus *bus)
0338 {
0339 struct pci_controller *hose = pci_bus_to_host(bus);
0340
0341 return hose->global_number;
0342 }
0343 EXPORT_SYMBOL(pci_domain_nr);
0344
0345
0346
0347
0348
0349
0350
0351
0352 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
0353 {
0354 while(node) {
0355 struct pci_controller *hose, *tmp;
0356 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
0357 if (hose->dn == node)
0358 return hose;
0359 node = node->parent;
0360 }
0361 return NULL;
0362 }
0363
0364 struct pci_controller *pci_find_controller_for_domain(int domain_nr)
0365 {
0366 struct pci_controller *hose;
0367
0368 list_for_each_entry(hose, &hose_list, list_node)
0369 if (hose->global_number == domain_nr)
0370 return hose;
0371
0372 return NULL;
0373 }
0374
0375 struct pci_intx_virq {
0376 int virq;
0377 struct kref kref;
0378 struct list_head list_node;
0379 };
0380
0381 static LIST_HEAD(intx_list);
0382 static DEFINE_MUTEX(intx_mutex);
0383
0384 static void ppc_pci_intx_release(struct kref *kref)
0385 {
0386 struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
0387
0388 list_del(&vi->list_node);
0389 irq_dispose_mapping(vi->virq);
0390 kfree(vi);
0391 }
0392
0393 static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
0394 unsigned long action, void *data)
0395 {
0396 struct pci_dev *pdev = to_pci_dev(data);
0397
0398 if (action == BUS_NOTIFY_DEL_DEVICE) {
0399 struct pci_intx_virq *vi;
0400
0401 mutex_lock(&intx_mutex);
0402 list_for_each_entry(vi, &intx_list, list_node) {
0403 if (vi->virq == pdev->irq) {
0404 kref_put(&vi->kref, ppc_pci_intx_release);
0405 break;
0406 }
0407 }
0408 mutex_unlock(&intx_mutex);
0409 }
0410
0411 return NOTIFY_DONE;
0412 }
0413
0414 static struct notifier_block ppc_pci_unmap_irq_notifier = {
0415 .notifier_call = ppc_pci_unmap_irq_line,
0416 };
0417
0418 static int ppc_pci_register_irq_notifier(void)
0419 {
0420 return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
0421 }
0422 arch_initcall(ppc_pci_register_irq_notifier);
0423
0424
0425
0426
0427
0428
0429 static int pci_read_irq_line(struct pci_dev *pci_dev)
0430 {
0431 int virq;
0432 struct pci_intx_virq *vi, *vitmp;
0433
0434
0435 vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
0436 if (!vi)
0437 return -1;
0438
0439 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
0440
0441
0442 virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
0443 if (virq <= 0) {
0444 u8 line, pin;
0445
0446
0447
0448
0449
0450
0451
0452
0453 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
0454 goto error_exit;
0455 if (pin == 0)
0456 goto error_exit;
0457 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
0458 line == 0xff || line == 0) {
0459 goto error_exit;
0460 }
0461 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
0462 line, pin);
0463
0464 virq = irq_create_mapping(NULL, line);
0465 if (virq)
0466 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
0467 }
0468
0469 if (!virq) {
0470 pr_debug(" Failed to map !\n");
0471 goto error_exit;
0472 }
0473
0474 pr_debug(" Mapped to linux irq %d\n", virq);
0475
0476 pci_dev->irq = virq;
0477
0478 mutex_lock(&intx_mutex);
0479 list_for_each_entry(vitmp, &intx_list, list_node) {
0480 if (vitmp->virq == virq) {
0481 kref_get(&vitmp->kref);
0482 kfree(vi);
0483 vi = NULL;
0484 break;
0485 }
0486 }
0487 if (vi) {
0488 vi->virq = virq;
0489 kref_init(&vi->kref);
0490 list_add_tail(&vi->list_node, &intx_list);
0491 }
0492 mutex_unlock(&intx_mutex);
0493
0494 return 0;
0495 error_exit:
0496 kfree(vi);
0497 return -1;
0498 }
0499
0500
0501
0502
0503
0504 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
0505 {
0506 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
0507 resource_size_t ioaddr = pci_resource_start(pdev, bar);
0508
0509 if (!hose)
0510 return -EINVAL;
0511
0512
0513 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
0514
0515 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
0516 return 0;
0517 }
0518
0519
0520
0521
0522
0523
0524 pgprot_t pci_phys_mem_access_prot(struct file *file,
0525 unsigned long pfn,
0526 unsigned long size,
0527 pgprot_t prot)
0528 {
0529 struct pci_dev *pdev = NULL;
0530 struct resource *found = NULL;
0531 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
0532 int i;
0533
0534 if (page_is_ram(pfn))
0535 return prot;
0536
0537 prot = pgprot_noncached(prot);
0538 for_each_pci_dev(pdev) {
0539 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
0540 struct resource *rp = &pdev->resource[i];
0541 int flags = rp->flags;
0542
0543
0544 if ((flags & IORESOURCE_MEM) == 0)
0545 continue;
0546
0547 if (offset < (rp->start & PAGE_MASK) ||
0548 offset > rp->end)
0549 continue;
0550 found = rp;
0551 break;
0552 }
0553 if (found)
0554 break;
0555 }
0556 if (found) {
0557 if (found->flags & IORESOURCE_PREFETCH)
0558 prot = pgprot_noncached_wc(prot);
0559 pci_dev_put(pdev);
0560 }
0561
0562 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
0563 (unsigned long long)offset, pgprot_val(prot));
0564
0565 return prot;
0566 }
0567
0568
0569 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
0570 {
0571 unsigned long offset;
0572 struct pci_controller *hose = pci_bus_to_host(bus);
0573 struct resource *rp = &hose->io_resource;
0574 void __iomem *addr;
0575
0576
0577
0578
0579
0580
0581 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
0582 offset += port;
0583
0584 if (!(rp->flags & IORESOURCE_IO))
0585 return -ENXIO;
0586 if (offset < rp->start || (offset + size) > rp->end)
0587 return -ENXIO;
0588 addr = hose->io_base_virt + port;
0589
0590 switch(size) {
0591 case 1:
0592 *((u8 *)val) = in_8(addr);
0593 return 1;
0594 case 2:
0595 if (port & 1)
0596 return -EINVAL;
0597 *((u16 *)val) = in_le16(addr);
0598 return 2;
0599 case 4:
0600 if (port & 3)
0601 return -EINVAL;
0602 *((u32 *)val) = in_le32(addr);
0603 return 4;
0604 }
0605 return -EINVAL;
0606 }
0607
0608
0609 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
0610 {
0611 unsigned long offset;
0612 struct pci_controller *hose = pci_bus_to_host(bus);
0613 struct resource *rp = &hose->io_resource;
0614 void __iomem *addr;
0615
0616
0617
0618
0619
0620
0621 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
0622 offset += port;
0623
0624 if (!(rp->flags & IORESOURCE_IO))
0625 return -ENXIO;
0626 if (offset < rp->start || (offset + size) > rp->end)
0627 return -ENXIO;
0628 addr = hose->io_base_virt + port;
0629
0630
0631
0632
0633
0634
0635 switch(size) {
0636 case 1:
0637 out_8(addr, val >> 24);
0638 return 1;
0639 case 2:
0640 if (port & 1)
0641 return -EINVAL;
0642 out_le16(addr, val >> 16);
0643 return 2;
0644 case 4:
0645 if (port & 3)
0646 return -EINVAL;
0647 out_le32(addr, val);
0648 return 4;
0649 }
0650 return -EINVAL;
0651 }
0652
0653
0654 int pci_mmap_legacy_page_range(struct pci_bus *bus,
0655 struct vm_area_struct *vma,
0656 enum pci_mmap_state mmap_state)
0657 {
0658 struct pci_controller *hose = pci_bus_to_host(bus);
0659 resource_size_t offset =
0660 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
0661 resource_size_t size = vma->vm_end - vma->vm_start;
0662 struct resource *rp;
0663
0664 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
0665 pci_domain_nr(bus), bus->number,
0666 mmap_state == pci_mmap_mem ? "MEM" : "IO",
0667 (unsigned long long)offset,
0668 (unsigned long long)(offset + size - 1));
0669
0670 if (mmap_state == pci_mmap_mem) {
0671
0672
0673
0674
0675
0676
0677
0678 if ((offset + size) > hose->isa_mem_size) {
0679 printk(KERN_DEBUG
0680 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
0681 current->comm, current->pid, pci_domain_nr(bus), bus->number);
0682 if (vma->vm_flags & VM_SHARED)
0683 return shmem_zero_setup(vma);
0684 return 0;
0685 }
0686 offset += hose->isa_mem_phys;
0687 } else {
0688 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
0689 unsigned long roffset = offset + io_offset;
0690 rp = &hose->io_resource;
0691 if (!(rp->flags & IORESOURCE_IO))
0692 return -ENXIO;
0693 if (roffset < rp->start || (roffset + size) > rp->end)
0694 return -ENXIO;
0695 offset += hose->io_base_phys;
0696 }
0697 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
0698
0699 vma->vm_pgoff = offset >> PAGE_SHIFT;
0700 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0701 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
0702 vma->vm_end - vma->vm_start,
0703 vma->vm_page_prot);
0704 }
0705
0706 void pci_resource_to_user(const struct pci_dev *dev, int bar,
0707 const struct resource *rsrc,
0708 resource_size_t *start, resource_size_t *end)
0709 {
0710 struct pci_bus_region region;
0711
0712 if (rsrc->flags & IORESOURCE_IO) {
0713 pcibios_resource_to_bus(dev->bus, ®ion,
0714 (struct resource *) rsrc);
0715 *start = region.start;
0716 *end = region.end;
0717 return;
0718 }
0719
0720
0721
0722
0723
0724
0725
0726
0727 *start = rsrc->start;
0728 *end = rsrc->end;
0729 }
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
0756 struct device_node *dev, int primary)
0757 {
0758 int memno = 0;
0759 struct resource *res;
0760 struct of_pci_range range;
0761 struct of_pci_range_parser parser;
0762
0763 printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
0764 dev, primary ? "(primary)" : "");
0765
0766
0767 if (of_pci_range_parser_init(&parser, dev))
0768 return;
0769
0770
0771 for_each_of_pci_range(&parser, &range) {
0772
0773
0774
0775
0776
0777 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
0778 continue;
0779
0780
0781 res = NULL;
0782 switch (range.flags & IORESOURCE_TYPE_BITS) {
0783 case IORESOURCE_IO:
0784 printk(KERN_INFO
0785 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
0786 range.cpu_addr, range.cpu_addr + range.size - 1,
0787 range.pci_addr);
0788
0789
0790 if (hose->pci_io_size) {
0791 printk(KERN_INFO
0792 " \\--> Skipped (too many) !\n");
0793 continue;
0794 }
0795 #ifdef CONFIG_PPC32
0796
0797 if (range.size > 0x01000000)
0798 range.size = 0x01000000;
0799
0800
0801 hose->io_base_virt = ioremap(range.cpu_addr,
0802 range.size);
0803
0804
0805 if (primary)
0806 isa_io_base =
0807 (unsigned long)hose->io_base_virt;
0808 #endif
0809
0810
0811
0812 hose->pci_io_size = range.pci_addr + range.size;
0813 hose->io_base_phys = range.cpu_addr - range.pci_addr;
0814
0815
0816 res = &hose->io_resource;
0817 range.cpu_addr = range.pci_addr;
0818 break;
0819 case IORESOURCE_MEM:
0820 printk(KERN_INFO
0821 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
0822 range.cpu_addr, range.cpu_addr + range.size - 1,
0823 range.pci_addr,
0824 (range.flags & IORESOURCE_PREFETCH) ?
0825 "Prefetch" : "");
0826
0827
0828 if (memno >= 3) {
0829 printk(KERN_INFO
0830 " \\--> Skipped (too many) !\n");
0831 continue;
0832 }
0833
0834 if (range.pci_addr == 0) {
0835 if (primary || isa_mem_base == 0)
0836 isa_mem_base = range.cpu_addr;
0837 hose->isa_mem_phys = range.cpu_addr;
0838 hose->isa_mem_size = range.size;
0839 }
0840
0841
0842 hose->mem_offset[memno] = range.cpu_addr -
0843 range.pci_addr;
0844 res = &hose->mem_resources[memno++];
0845 break;
0846 }
0847 if (res != NULL) {
0848 res->name = dev->full_name;
0849 res->flags = range.flags;
0850 res->start = range.cpu_addr;
0851 res->end = range.cpu_addr + range.size - 1;
0852 res->parent = res->child = res->sibling = NULL;
0853 }
0854 }
0855 }
0856
0857
0858 int pci_proc_domain(struct pci_bus *bus)
0859 {
0860 struct pci_controller *hose = pci_bus_to_host(bus);
0861
0862 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
0863 return 0;
0864 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
0865 return hose->global_number != 0;
0866 return 1;
0867 }
0868
0869 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
0870 {
0871 if (ppc_md.pcibios_root_bridge_prepare)
0872 return ppc_md.pcibios_root_bridge_prepare(bridge);
0873
0874 return 0;
0875 }
0876
0877
0878
0879
0880 static void pcibios_fixup_resources(struct pci_dev *dev)
0881 {
0882 struct pci_controller *hose = pci_bus_to_host(dev->bus);
0883 int i;
0884
0885 if (!hose) {
0886 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
0887 pci_name(dev));
0888 return;
0889 }
0890
0891 if (dev->is_virtfn)
0892 return;
0893
0894 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
0895 struct resource *res = dev->resource + i;
0896 struct pci_bus_region reg;
0897 if (!res->flags)
0898 continue;
0899
0900
0901
0902
0903
0904
0905 pcibios_resource_to_bus(dev->bus, ®, res);
0906 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
0907 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
0908
0909 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
0910 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
0911 pci_name(dev), i, res);
0912 res->end -= res->start;
0913 res->start = 0;
0914 res->flags |= IORESOURCE_UNSET;
0915 continue;
0916 }
0917
0918 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
0919 }
0920
0921
0922 if (ppc_md.pcibios_fixup_resources)
0923 ppc_md.pcibios_fixup_resources(dev);
0924 }
0925 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
0926
0927
0928
0929
0930
0931
0932 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
0933 struct resource *res)
0934 {
0935 struct pci_controller *hose = pci_bus_to_host(bus);
0936 struct pci_dev *dev = bus->self;
0937 resource_size_t offset;
0938 struct pci_bus_region region;
0939 u16 command;
0940 int i;
0941
0942
0943 if (pci_has_flag(PCI_PROBE_ONLY))
0944 return 0;
0945
0946
0947 if (res->flags & IORESOURCE_MEM) {
0948 pcibios_resource_to_bus(dev->bus, ®ion, res);
0949
0950
0951 if (region.start != 0)
0952 return 0;
0953
0954
0955
0956
0957 pci_read_config_word(dev, PCI_COMMAND, &command);
0958 if ((command & PCI_COMMAND_MEMORY) == 0)
0959 return 1;
0960
0961
0962
0963
0964
0965 for (i = 0; i < 3; i++) {
0966 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
0967 hose->mem_resources[i].start == hose->mem_offset[i])
0968 return 0;
0969 }
0970
0971
0972
0973
0974 return 1;
0975 } else {
0976
0977 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
0978 if (((res->start - offset) & 0xfffffffful) != 0)
0979 return 0;
0980
0981
0982
0983
0984
0985
0986 pci_read_config_word(dev, PCI_COMMAND, &command);
0987 if (command & PCI_COMMAND_IO)
0988 return 0;
0989
0990
0991
0992
0993 return 1;
0994 }
0995 }
0996
0997
0998 static void pcibios_fixup_bridge(struct pci_bus *bus)
0999 {
1000 struct resource *res;
1001 int i;
1002
1003 struct pci_dev *dev = bus->self;
1004
1005 pci_bus_for_each_resource(bus, res, i) {
1006 if (!res || !res->flags)
1007 continue;
1008 if (i >= 3 && bus->self->transparent)
1009 continue;
1010
1011
1012
1013
1014
1015 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1016 res->flags |= IORESOURCE_UNSET;
1017 res->start = 0;
1018 res->end = -1;
1019 continue;
1020 }
1021
1022 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
1023
1024
1025
1026
1027 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1028 res->flags = 0;
1029 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1030 }
1031 }
1032 }
1033
1034 void pcibios_setup_bus_self(struct pci_bus *bus)
1035 {
1036 struct pci_controller *phb;
1037
1038
1039 if (bus->self != NULL)
1040 pcibios_fixup_bridge(bus);
1041
1042
1043
1044
1045 if (ppc_md.pcibios_fixup_bus)
1046 ppc_md.pcibios_fixup_bus(bus);
1047
1048
1049 phb = pci_bus_to_host(bus);
1050 if (phb->controller_ops.dma_bus_setup)
1051 phb->controller_ops.dma_bus_setup(bus);
1052 }
1053
1054 void pcibios_bus_add_device(struct pci_dev *dev)
1055 {
1056 struct pci_controller *phb;
1057
1058
1059
1060 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1061
1062
1063 set_dma_ops(&dev->dev, pci_dma_ops);
1064 dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
1065
1066
1067 phb = pci_bus_to_host(dev->bus);
1068 if (phb->controller_ops.dma_dev_setup)
1069 phb->controller_ops.dma_dev_setup(dev);
1070
1071
1072 pci_read_irq_line(dev);
1073 if (ppc_md.pci_irq_fixup)
1074 ppc_md.pci_irq_fixup(dev);
1075
1076 if (ppc_md.pcibios_bus_add_device)
1077 ppc_md.pcibios_bus_add_device(dev);
1078 }
1079
1080 int pcibios_device_add(struct pci_dev *dev)
1081 {
1082 struct irq_domain *d;
1083
1084 #ifdef CONFIG_PCI_IOV
1085 if (ppc_md.pcibios_fixup_sriov)
1086 ppc_md.pcibios_fixup_sriov(dev);
1087 #endif
1088
1089 d = dev_get_msi_domain(&dev->bus->dev);
1090 if (d)
1091 dev_set_msi_domain(&dev->dev, d);
1092 return 0;
1093 }
1094
1095 void pcibios_set_master(struct pci_dev *dev)
1096 {
1097
1098 }
1099
1100 void pcibios_fixup_bus(struct pci_bus *bus)
1101 {
1102
1103
1104
1105
1106 pci_read_bridge_bases(bus);
1107
1108
1109 pcibios_setup_bus_self(bus);
1110 }
1111 EXPORT_SYMBOL(pcibios_fixup_bus);
1112
1113 static int skip_isa_ioresource_align(struct pci_dev *dev)
1114 {
1115 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1116 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1117 return 1;
1118 return 0;
1119 }
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1135 resource_size_t size, resource_size_t align)
1136 {
1137 struct pci_dev *dev = data;
1138 resource_size_t start = res->start;
1139
1140 if (res->flags & IORESOURCE_IO) {
1141 if (skip_isa_ioresource_align(dev))
1142 return start;
1143 if (start & 0x300)
1144 start = (start + 0x3ff) & ~0x3ff;
1145 }
1146
1147 return start;
1148 }
1149 EXPORT_SYMBOL(pcibios_align_resource);
1150
1151
1152
1153
1154
1155 static int reparent_resources(struct resource *parent,
1156 struct resource *res)
1157 {
1158 struct resource *p, **pp;
1159 struct resource **firstpp = NULL;
1160
1161 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1162 if (p->end < res->start)
1163 continue;
1164 if (res->end < p->start)
1165 break;
1166 if (p->start < res->start || p->end > res->end)
1167 return -1;
1168 if (firstpp == NULL)
1169 firstpp = pp;
1170 }
1171 if (firstpp == NULL)
1172 return -1;
1173 res->parent = parent;
1174 res->child = *firstpp;
1175 res->sibling = *pp;
1176 *firstpp = res;
1177 *pp = NULL;
1178 for (p = res->child; p != NULL; p = p->sibling) {
1179 p->parent = res;
1180 pr_debug("PCI: Reparented %s %pR under %s\n",
1181 p->name, p, res->name);
1182 }
1183 return 0;
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1220 {
1221 struct pci_bus *b;
1222 int i;
1223 struct resource *res, *pr;
1224
1225 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1226 pci_domain_nr(bus), bus->number);
1227
1228 pci_bus_for_each_resource(bus, res, i) {
1229 if (!res || !res->flags || res->start > res->end || res->parent)
1230 continue;
1231
1232
1233 if (res->flags & IORESOURCE_UNSET)
1234 goto clear_resource;
1235
1236 if (bus->parent == NULL)
1237 pr = (res->flags & IORESOURCE_IO) ?
1238 &ioport_resource : &iomem_resource;
1239 else {
1240 pr = pci_find_parent_resource(bus->self, res);
1241 if (pr == res) {
1242
1243
1244
1245
1246 continue;
1247 }
1248 }
1249
1250 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1251 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1252 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1253
1254 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1255 struct pci_dev *dev = bus->self;
1256
1257 if (request_resource(pr, res) == 0)
1258 continue;
1259
1260
1261
1262
1263
1264 if (reparent_resources(pr, res) == 0)
1265 continue;
1266
1267 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1268 pci_claim_bridge_resource(dev,
1269 i + PCI_BRIDGE_RESOURCES) == 0)
1270 continue;
1271 }
1272 pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1273 i, bus->number);
1274 clear_resource:
1275
1276
1277
1278
1279
1280
1281 res->start = 0;
1282 res->end = -1;
1283 res->flags = 0;
1284 }
1285
1286 list_for_each_entry(b, &bus->children, node)
1287 pcibios_allocate_bus_resources(b);
1288 }
1289
1290 static inline void alloc_resource(struct pci_dev *dev, int idx)
1291 {
1292 struct resource *pr, *r = &dev->resource[idx];
1293
1294 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1295 pci_name(dev), idx, r);
1296
1297 pr = pci_find_parent_resource(dev, r);
1298 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1299 request_resource(pr, r) < 0) {
1300 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1301 " of device %s, will remap\n", idx, pci_name(dev));
1302 if (pr)
1303 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1304
1305 r->flags |= IORESOURCE_UNSET;
1306 r->end -= r->start;
1307 r->start = 0;
1308 }
1309 }
1310
1311 static void __init pcibios_allocate_resources(int pass)
1312 {
1313 struct pci_dev *dev = NULL;
1314 int idx, disabled;
1315 u16 command;
1316 struct resource *r;
1317
1318 for_each_pci_dev(dev) {
1319 pci_read_config_word(dev, PCI_COMMAND, &command);
1320 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1321 r = &dev->resource[idx];
1322 if (r->parent)
1323 continue;
1324 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1325 continue;
1326
1327
1328
1329 if (idx == PCI_ROM_RESOURCE )
1330 disabled = 1;
1331 if (r->flags & IORESOURCE_IO)
1332 disabled = !(command & PCI_COMMAND_IO);
1333 else
1334 disabled = !(command & PCI_COMMAND_MEMORY);
1335 if (pass == disabled)
1336 alloc_resource(dev, idx);
1337 }
1338 if (pass)
1339 continue;
1340 r = &dev->resource[PCI_ROM_RESOURCE];
1341 if (r->flags) {
1342
1343
1344
1345 u32 reg;
1346 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1347 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1348 pr_debug("PCI: Switching off ROM of %s\n",
1349 pci_name(dev));
1350 r->flags &= ~IORESOURCE_ROM_ENABLE;
1351 pci_write_config_dword(dev, dev->rom_base_reg,
1352 reg & ~PCI_ROM_ADDRESS_ENABLE);
1353 }
1354 }
1355 }
1356 }
1357
1358 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1359 {
1360 struct pci_controller *hose = pci_bus_to_host(bus);
1361 resource_size_t offset;
1362 struct resource *res, *pres;
1363 int i;
1364
1365 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1366
1367
1368 if (!(hose->io_resource.flags & IORESOURCE_IO))
1369 goto no_io;
1370 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1371 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1372 BUG_ON(res == NULL);
1373 res->name = "Legacy IO";
1374 res->flags = IORESOURCE_IO;
1375 res->start = offset;
1376 res->end = (offset + 0xfff) & 0xfffffffful;
1377 pr_debug("Candidate legacy IO: %pR\n", res);
1378 if (request_resource(&hose->io_resource, res)) {
1379 printk(KERN_DEBUG
1380 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1381 pci_domain_nr(bus), bus->number, res);
1382 kfree(res);
1383 }
1384
1385 no_io:
1386
1387 for (i = 0; i < 3; i++) {
1388 pres = &hose->mem_resources[i];
1389 offset = hose->mem_offset[i];
1390 if (!(pres->flags & IORESOURCE_MEM))
1391 continue;
1392 pr_debug("hose mem res: %pR\n", pres);
1393 if ((pres->start - offset) <= 0xa0000 &&
1394 (pres->end - offset) >= 0xbffff)
1395 break;
1396 }
1397 if (i >= 3)
1398 return;
1399 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1400 BUG_ON(res == NULL);
1401 res->name = "Legacy VGA memory";
1402 res->flags = IORESOURCE_MEM;
1403 res->start = 0xa0000 + offset;
1404 res->end = 0xbffff + offset;
1405 pr_debug("Candidate VGA memory: %pR\n", res);
1406 if (request_resource(pres, res)) {
1407 printk(KERN_DEBUG
1408 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1409 pci_domain_nr(bus), bus->number, res);
1410 kfree(res);
1411 }
1412 }
1413
1414 void __init pcibios_resource_survey(void)
1415 {
1416 struct pci_bus *b;
1417
1418
1419 list_for_each_entry(b, &pci_root_buses, node)
1420 pcibios_allocate_bus_resources(b);
1421 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1422 pcibios_allocate_resources(0);
1423 pcibios_allocate_resources(1);
1424 }
1425
1426
1427
1428
1429
1430 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1431 list_for_each_entry(b, &pci_root_buses, node)
1432 pcibios_reserve_legacy_regions(b);
1433 }
1434
1435
1436
1437
1438 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1439 pr_debug("PCI: Assigning unassigned resources...\n");
1440 pci_assign_unassigned_resources();
1441 }
1442 }
1443
1444
1445
1446
1447
1448
1449 void pcibios_claim_one_bus(struct pci_bus *bus)
1450 {
1451 struct pci_dev *dev;
1452 struct pci_bus *child_bus;
1453
1454 list_for_each_entry(dev, &bus->devices, bus_list) {
1455 int i;
1456
1457 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1458 struct resource *r = &dev->resource[i];
1459
1460 if (r->parent || !r->start || !r->flags)
1461 continue;
1462
1463 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1464 pci_name(dev), i, r);
1465
1466 if (pci_claim_resource(dev, i) == 0)
1467 continue;
1468
1469 pci_claim_bridge_resource(dev, i);
1470 }
1471 }
1472
1473 list_for_each_entry(child_bus, &bus->children, node)
1474 pcibios_claim_one_bus(child_bus);
1475 }
1476 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1477
1478
1479
1480
1481
1482
1483
1484
1485 void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1486 {
1487 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1488 pci_domain_nr(bus), bus->number);
1489
1490
1491 pcibios_allocate_bus_resources(bus);
1492 pcibios_claim_one_bus(bus);
1493 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1494 if (bus->self)
1495 pci_assign_unassigned_bridge_resources(bus->self);
1496 else
1497 pci_assign_unassigned_bus_resources(bus);
1498 }
1499
1500
1501 pci_bus_add_devices(bus);
1502 }
1503 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1504
1505 int pcibios_enable_device(struct pci_dev *dev, int mask)
1506 {
1507 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1508
1509 if (phb->controller_ops.enable_device_hook)
1510 if (!phb->controller_ops.enable_device_hook(dev))
1511 return -EINVAL;
1512
1513 return pci_enable_resources(dev, mask);
1514 }
1515
1516 void pcibios_disable_device(struct pci_dev *dev)
1517 {
1518 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1519
1520 if (phb->controller_ops.disable_device)
1521 phb->controller_ops.disable_device(dev);
1522 }
1523
1524 resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1525 {
1526 return (unsigned long) hose->io_base_virt - _IO_BASE;
1527 }
1528
1529 static void pcibios_setup_phb_resources(struct pci_controller *hose,
1530 struct list_head *resources)
1531 {
1532 struct resource *res;
1533 resource_size_t offset;
1534 int i;
1535
1536
1537 res = &hose->io_resource;
1538
1539 if (!res->flags) {
1540 pr_debug("PCI: I/O resource not set for host"
1541 " bridge %pOF (domain %d)\n",
1542 hose->dn, hose->global_number);
1543 } else {
1544 offset = pcibios_io_space_offset(hose);
1545
1546 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1547 res, (unsigned long long)offset);
1548 pci_add_resource_offset(resources, res, offset);
1549 }
1550
1551
1552 for (i = 0; i < 3; ++i) {
1553 res = &hose->mem_resources[i];
1554 if (!res->flags)
1555 continue;
1556
1557 offset = hose->mem_offset[i];
1558 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1559 res, (unsigned long long)offset);
1560
1561 pci_add_resource_offset(resources, res, offset);
1562 }
1563 }
1564
1565
1566
1567
1568
1569 #define NULL_PCI_OP(rw, size, type) \
1570 static int \
1571 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1572 { \
1573 return PCIBIOS_DEVICE_NOT_FOUND; \
1574 }
1575
1576 static int
1577 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1578 int len, u32 *val)
1579 {
1580 return PCIBIOS_DEVICE_NOT_FOUND;
1581 }
1582
1583 static int
1584 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1585 int len, u32 val)
1586 {
1587 return PCIBIOS_DEVICE_NOT_FOUND;
1588 }
1589
1590 static struct pci_ops null_pci_ops =
1591 {
1592 .read = null_read_config,
1593 .write = null_write_config,
1594 };
1595
1596
1597
1598
1599
1600 static struct pci_bus *
1601 fake_pci_bus(struct pci_controller *hose, int busnr)
1602 {
1603 static struct pci_bus bus;
1604
1605 if (hose == NULL) {
1606 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1607 }
1608 bus.number = busnr;
1609 bus.sysdata = hose;
1610 bus.ops = hose? hose->ops: &null_pci_ops;
1611 return &bus;
1612 }
1613
1614 #define EARLY_PCI_OP(rw, size, type) \
1615 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1616 int devfn, int offset, type value) \
1617 { \
1618 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1619 devfn, offset, value); \
1620 }
1621
1622 EARLY_PCI_OP(read, byte, u8 *)
1623 EARLY_PCI_OP(read, word, u16 *)
1624 EARLY_PCI_OP(read, dword, u32 *)
1625 EARLY_PCI_OP(write, byte, u8)
1626 EARLY_PCI_OP(write, word, u16)
1627 EARLY_PCI_OP(write, dword, u32)
1628
1629 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1630 int cap)
1631 {
1632 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1633 }
1634
1635 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1636 {
1637 struct pci_controller *hose = bus->sysdata;
1638
1639 return of_node_get(hose->dn);
1640 }
1641
1642
1643
1644
1645
1646 void pcibios_scan_phb(struct pci_controller *hose)
1647 {
1648 LIST_HEAD(resources);
1649 struct pci_bus *bus;
1650 struct device_node *node = hose->dn;
1651 int mode;
1652
1653 pr_debug("PCI: Scanning PHB %pOF\n", node);
1654
1655
1656 pcibios_setup_phb_io_space(hose);
1657
1658
1659 pcibios_setup_phb_resources(hose, &resources);
1660
1661 hose->busn.start = hose->first_busno;
1662 hose->busn.end = hose->last_busno;
1663 hose->busn.flags = IORESOURCE_BUS;
1664 pci_add_resource(&resources, &hose->busn);
1665
1666
1667 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1668 hose->ops, hose, &resources);
1669 if (bus == NULL) {
1670 pr_err("Failed to create bus for PCI domain %04x\n",
1671 hose->global_number);
1672 pci_free_resource_list(&resources);
1673 return;
1674 }
1675 hose->bus = bus;
1676
1677
1678 mode = PCI_PROBE_NORMAL;
1679 if (node && hose->controller_ops.probe_mode)
1680 mode = hose->controller_ops.probe_mode(bus);
1681 pr_debug(" probe mode: %d\n", mode);
1682 if (mode == PCI_PROBE_DEVTREE)
1683 of_scan_bus(node, bus);
1684
1685 if (mode == PCI_PROBE_NORMAL) {
1686 pci_bus_update_busn_res_end(bus, 255);
1687 hose->last_busno = pci_scan_child_bus(bus);
1688 pci_bus_update_busn_res_end(bus, hose->last_busno);
1689 }
1690
1691
1692
1693
1694 if (ppc_md.pcibios_fixup_phb)
1695 ppc_md.pcibios_fixup_phb(hose);
1696
1697
1698 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1699 struct pci_bus *child;
1700 list_for_each_entry(child, &bus->children, node)
1701 pcie_bus_configure_settings(child);
1702 }
1703 }
1704 EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1705
1706 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1707 {
1708 int i, class = dev->class >> 8;
1709
1710 int prog_if = dev->class & 0xf;
1711
1712 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1713 class == PCI_CLASS_BRIDGE_OTHER) &&
1714 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1715 (prog_if == 0) &&
1716 (dev->bus->parent == NULL)) {
1717 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1718 dev->resource[i].start = 0;
1719 dev->resource[i].end = 0;
1720 dev->resource[i].flags = 0;
1721 }
1722 }
1723 }
1724 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1725 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1726
1727
1728 static int __init discover_phbs(void)
1729 {
1730 if (ppc_md.discover_phbs)
1731 ppc_md.discover_phbs();
1732
1733 return 0;
1734 }
1735 core_initcall(discover_phbs);