Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Various workarounds for chipset bugs.
0003    This code runs very early and can't use the regular PCI subsystem
0004    The entries are keyed to PCI bridges which usually identify chipsets
0005    uniquely.
0006    This is only for whole classes of chipsets with specific problems which
0007    need early invasive action (e.g. before the timers are initialized).
0008    Most PCI device specific workarounds can be done later and should be
0009    in standard PCI quirks
0010    Mainboard specific bugs should be handled by DMI entries.
0011    CPU specific bugs in setup.c */
0012 
0013 #include <linux/pci.h>
0014 #include <linux/acpi.h>
0015 #include <linux/delay.h>
0016 #include <linux/pci_ids.h>
0017 #include <linux/bcma/bcma.h>
0018 #include <linux/bcma/bcma_regs.h>
0019 #include <linux/platform_data/x86/apple.h>
0020 #include <drm/i915_drm.h>
0021 #include <drm/i915_pciids.h>
0022 #include <asm/pci-direct.h>
0023 #include <asm/dma.h>
0024 #include <asm/io_apic.h>
0025 #include <asm/apic.h>
0026 #include <asm/hpet.h>
0027 #include <asm/iommu.h>
0028 #include <asm/gart.h>
0029 #include <asm/irq_remapping.h>
0030 #include <asm/early_ioremap.h>
0031 
0032 static void __init fix_hypertransport_config(int num, int slot, int func)
0033 {
0034     u32 htcfg;
0035     /*
0036      * we found a hypertransport bus
0037      * make sure that we are broadcasting
0038      * interrupts to all cpus on the ht bus
0039      * if we're using extended apic ids
0040      */
0041     htcfg = read_pci_config(num, slot, func, 0x68);
0042     if (htcfg & (1 << 18)) {
0043         printk(KERN_INFO "Detected use of extended apic ids "
0044                  "on hypertransport bus\n");
0045         if ((htcfg & (1 << 17)) == 0) {
0046             printk(KERN_INFO "Enabling hypertransport extended "
0047                      "apic interrupt broadcast\n");
0048             printk(KERN_INFO "Note this is a bios bug, "
0049                      "please contact your hw vendor\n");
0050             htcfg |= (1 << 17);
0051             write_pci_config(num, slot, func, 0x68, htcfg);
0052         }
0053     }
0054 
0055 
0056 }
0057 
0058 static void __init via_bugs(int  num, int slot, int func)
0059 {
0060 #ifdef CONFIG_GART_IOMMU
0061     if ((max_pfn > MAX_DMA32_PFN ||  force_iommu) &&
0062         !gart_iommu_aperture_allowed) {
0063         printk(KERN_INFO
0064                "Looks like a VIA chipset. Disabling IOMMU."
0065                " Override with iommu=allowed\n");
0066         gart_iommu_aperture_disabled = 1;
0067     }
0068 #endif
0069 }
0070 
0071 #ifdef CONFIG_ACPI
0072 #ifdef CONFIG_X86_IO_APIC
0073 
0074 static int __init nvidia_hpet_check(struct acpi_table_header *header)
0075 {
0076     return 0;
0077 }
0078 #endif /* CONFIG_X86_IO_APIC */
0079 #endif /* CONFIG_ACPI */
0080 
0081 static void __init nvidia_bugs(int num, int slot, int func)
0082 {
0083 #ifdef CONFIG_ACPI
0084 #ifdef CONFIG_X86_IO_APIC
0085     /*
0086      * Only applies to Nvidia root ports (bus 0) and not to
0087      * Nvidia graphics cards with PCI ports on secondary buses.
0088      */
0089     if (num)
0090         return;
0091 
0092     /*
0093      * All timer overrides on Nvidia are
0094      * wrong unless HPET is enabled.
0095      * Unfortunately that's not true on many Asus boards.
0096      * We don't know yet how to detect this automatically, but
0097      * at least allow a command line override.
0098      */
0099     if (acpi_use_timer_override)
0100         return;
0101 
0102     if (acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check)) {
0103         acpi_skip_timer_override = 1;
0104         printk(KERN_INFO "Nvidia board "
0105                "detected. Ignoring ACPI "
0106                "timer override.\n");
0107         printk(KERN_INFO "If you got timer trouble "
0108             "try acpi_use_timer_override\n");
0109     }
0110 #endif
0111 #endif
0112     /* RED-PEN skip them on mptables too? */
0113 
0114 }
0115 
0116 #if defined(CONFIG_ACPI) && defined(CONFIG_X86_IO_APIC)
0117 static u32 __init ati_ixp4x0_rev(int num, int slot, int func)
0118 {
0119     u32 d;
0120     u8  b;
0121 
0122     b = read_pci_config_byte(num, slot, func, 0xac);
0123     b &= ~(1<<5);
0124     write_pci_config_byte(num, slot, func, 0xac, b);
0125 
0126     d = read_pci_config(num, slot, func, 0x70);
0127     d |= 1<<8;
0128     write_pci_config(num, slot, func, 0x70, d);
0129 
0130     d = read_pci_config(num, slot, func, 0x8);
0131     d &= 0xff;
0132     return d;
0133 }
0134 
0135 static void __init ati_bugs(int num, int slot, int func)
0136 {
0137     u32 d;
0138     u8  b;
0139 
0140     if (acpi_use_timer_override)
0141         return;
0142 
0143     d = ati_ixp4x0_rev(num, slot, func);
0144     if (d  < 0x82)
0145         acpi_skip_timer_override = 1;
0146     else {
0147         /* check for IRQ0 interrupt swap */
0148         outb(0x72, 0xcd6); b = inb(0xcd7);
0149         if (!(b & 0x2))
0150             acpi_skip_timer_override = 1;
0151     }
0152 
0153     if (acpi_skip_timer_override) {
0154         printk(KERN_INFO "SB4X0 revision 0x%x\n", d);
0155         printk(KERN_INFO "Ignoring ACPI timer override.\n");
0156         printk(KERN_INFO "If you got timer trouble "
0157                "try acpi_use_timer_override\n");
0158     }
0159 }
0160 
0161 static u32 __init ati_sbx00_rev(int num, int slot, int func)
0162 {
0163     u32 d;
0164 
0165     d = read_pci_config(num, slot, func, 0x8);
0166     d &= 0xff;
0167 
0168     return d;
0169 }
0170 
0171 static void __init ati_bugs_contd(int num, int slot, int func)
0172 {
0173     u32 d, rev;
0174 
0175     rev = ati_sbx00_rev(num, slot, func);
0176     if (rev >= 0x40)
0177         acpi_fix_pin2_polarity = 1;
0178 
0179     /*
0180      * SB600: revisions 0x11, 0x12, 0x13, 0x14, ...
0181      * SB700: revisions 0x39, 0x3a, ...
0182      * SB800: revisions 0x40, 0x41, ...
0183      */
0184     if (rev >= 0x39)
0185         return;
0186 
0187     if (acpi_use_timer_override)
0188         return;
0189 
0190     /* check for IRQ0 interrupt swap */
0191     d = read_pci_config(num, slot, func, 0x64);
0192     if (!(d & (1<<14)))
0193         acpi_skip_timer_override = 1;
0194 
0195     if (acpi_skip_timer_override) {
0196         printk(KERN_INFO "SB600 revision 0x%x\n", rev);
0197         printk(KERN_INFO "Ignoring ACPI timer override.\n");
0198         printk(KERN_INFO "If you got timer trouble "
0199                "try acpi_use_timer_override\n");
0200     }
0201 }
0202 #else
0203 static void __init ati_bugs(int num, int slot, int func)
0204 {
0205 }
0206 
0207 static void __init ati_bugs_contd(int num, int slot, int func)
0208 {
0209 }
0210 #endif
0211 
0212 static void __init intel_remapping_check(int num, int slot, int func)
0213 {
0214     u8 revision;
0215     u16 device;
0216 
0217     device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
0218     revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
0219 
0220     /*
0221      * Revision <= 13 of all triggering devices id in this quirk
0222      * have a problem draining interrupts when irq remapping is
0223      * enabled, and should be flagged as broken. Additionally
0224      * revision 0x22 of device id 0x3405 has this problem.
0225      */
0226     if (revision <= 0x13)
0227         set_irq_remapping_broken();
0228     else if (device == 0x3405 && revision == 0x22)
0229         set_irq_remapping_broken();
0230 }
0231 
0232 /*
0233  * Systems with Intel graphics controllers set aside memory exclusively
0234  * for gfx driver use.  This memory is not marked in the E820 as reserved
0235  * or as RAM, and so is subject to overlap from E820 manipulation later
0236  * in the boot process.  On some systems, MMIO space is allocated on top,
0237  * despite the efforts of the "RAM buffer" approach, which simply rounds
0238  * memory boundaries up to 64M to try to catch space that may decode
0239  * as RAM and so is not suitable for MMIO.
0240  */
0241 
0242 #define KB(x)   ((x) * 1024UL)
0243 #define MB(x)   (KB (KB (x)))
0244 
0245 static resource_size_t __init i830_tseg_size(void)
0246 {
0247     u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
0248 
0249     if (!(esmramc & TSEG_ENABLE))
0250         return 0;
0251 
0252     if (esmramc & I830_TSEG_SIZE_1M)
0253         return MB(1);
0254     else
0255         return KB(512);
0256 }
0257 
0258 static resource_size_t __init i845_tseg_size(void)
0259 {
0260     u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
0261     u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
0262 
0263     if (!(esmramc & TSEG_ENABLE))
0264         return 0;
0265 
0266     switch (tseg_size) {
0267     case I845_TSEG_SIZE_512K:   return KB(512);
0268     case I845_TSEG_SIZE_1M:     return MB(1);
0269     default:
0270         WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
0271     }
0272     return 0;
0273 }
0274 
0275 static resource_size_t __init i85x_tseg_size(void)
0276 {
0277     u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
0278 
0279     if (!(esmramc & TSEG_ENABLE))
0280         return 0;
0281 
0282     return MB(1);
0283 }
0284 
0285 static resource_size_t __init i830_mem_size(void)
0286 {
0287     return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32);
0288 }
0289 
0290 static resource_size_t __init i85x_mem_size(void)
0291 {
0292     return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32);
0293 }
0294 
0295 /*
0296  * On 830/845/85x the stolen memory base isn't available in any
0297  * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
0298  */
0299 static resource_size_t __init i830_stolen_base(int num, int slot, int func,
0300                            resource_size_t stolen_size)
0301 {
0302     return i830_mem_size() - i830_tseg_size() - stolen_size;
0303 }
0304 
0305 static resource_size_t __init i845_stolen_base(int num, int slot, int func,
0306                            resource_size_t stolen_size)
0307 {
0308     return i830_mem_size() - i845_tseg_size() - stolen_size;
0309 }
0310 
0311 static resource_size_t __init i85x_stolen_base(int num, int slot, int func,
0312                            resource_size_t stolen_size)
0313 {
0314     return i85x_mem_size() - i85x_tseg_size() - stolen_size;
0315 }
0316 
0317 static resource_size_t __init i865_stolen_base(int num, int slot, int func,
0318                            resource_size_t stolen_size)
0319 {
0320     u16 toud = 0;
0321 
0322     toud = read_pci_config_16(0, 0, 0, I865_TOUD);
0323 
0324     return toud * KB(64) + i845_tseg_size();
0325 }
0326 
0327 static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
0328                            resource_size_t stolen_size)
0329 {
0330     u32 bsm;
0331 
0332     /* Almost universally we can find the Graphics Base of Stolen Memory
0333      * at register BSM (0x5c) in the igfx configuration space. On a few
0334      * (desktop) machines this is also mirrored in the bridge device at
0335      * different locations, or in the MCHBAR.
0336      */
0337     bsm = read_pci_config(num, slot, func, INTEL_BSM);
0338 
0339     return bsm & INTEL_BSM_MASK;
0340 }
0341 
0342 static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
0343                         resource_size_t stolen_size)
0344 {
0345     u64 bsm;
0346 
0347     bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
0348     bsm &= INTEL_BSM_MASK;
0349     bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
0350 
0351     return bsm;
0352 }
0353 
0354 static resource_size_t __init i830_stolen_size(int num, int slot, int func)
0355 {
0356     u16 gmch_ctrl;
0357     u16 gms;
0358 
0359     gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
0360     gms = gmch_ctrl & I830_GMCH_GMS_MASK;
0361 
0362     switch (gms) {
0363     case I830_GMCH_GMS_STOLEN_512:  return KB(512);
0364     case I830_GMCH_GMS_STOLEN_1024: return MB(1);
0365     case I830_GMCH_GMS_STOLEN_8192: return MB(8);
0366     /* local memory isn't part of the normal address space */
0367     case I830_GMCH_GMS_LOCAL:   return 0;
0368     default:
0369         WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
0370     }
0371 
0372     return 0;
0373 }
0374 
0375 static resource_size_t __init gen3_stolen_size(int num, int slot, int func)
0376 {
0377     u16 gmch_ctrl;
0378     u16 gms;
0379 
0380     gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
0381     gms = gmch_ctrl & I855_GMCH_GMS_MASK;
0382 
0383     switch (gms) {
0384     case I855_GMCH_GMS_STOLEN_1M:   return MB(1);
0385     case I855_GMCH_GMS_STOLEN_4M:   return MB(4);
0386     case I855_GMCH_GMS_STOLEN_8M:   return MB(8);
0387     case I855_GMCH_GMS_STOLEN_16M:  return MB(16);
0388     case I855_GMCH_GMS_STOLEN_32M:  return MB(32);
0389     case I915_GMCH_GMS_STOLEN_48M:  return MB(48);
0390     case I915_GMCH_GMS_STOLEN_64M:  return MB(64);
0391     case G33_GMCH_GMS_STOLEN_128M:  return MB(128);
0392     case G33_GMCH_GMS_STOLEN_256M:  return MB(256);
0393     case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
0394     case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
0395     case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
0396     case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
0397     default:
0398         WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
0399     }
0400 
0401     return 0;
0402 }
0403 
0404 static resource_size_t __init gen6_stolen_size(int num, int slot, int func)
0405 {
0406     u16 gmch_ctrl;
0407     u16 gms;
0408 
0409     gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
0410     gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
0411 
0412     return gms * MB(32);
0413 }
0414 
0415 static resource_size_t __init gen8_stolen_size(int num, int slot, int func)
0416 {
0417     u16 gmch_ctrl;
0418     u16 gms;
0419 
0420     gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
0421     gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
0422 
0423     return gms * MB(32);
0424 }
0425 
0426 static resource_size_t __init chv_stolen_size(int num, int slot, int func)
0427 {
0428     u16 gmch_ctrl;
0429     u16 gms;
0430 
0431     gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
0432     gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
0433 
0434     /*
0435      * 0x0  to 0x10: 32MB increments starting at 0MB
0436      * 0x11 to 0x16: 4MB increments starting at 8MB
0437      * 0x17 to 0x1d: 4MB increments start at 36MB
0438      */
0439     if (gms < 0x11)
0440         return gms * MB(32);
0441     else if (gms < 0x17)
0442         return (gms - 0x11) * MB(4) + MB(8);
0443     else
0444         return (gms - 0x17) * MB(4) + MB(36);
0445 }
0446 
0447 static resource_size_t __init gen9_stolen_size(int num, int slot, int func)
0448 {
0449     u16 gmch_ctrl;
0450     u16 gms;
0451 
0452     gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
0453     gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
0454 
0455     /* 0x0  to 0xef: 32MB increments starting at 0MB */
0456     /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
0457     if (gms < 0xf0)
0458         return gms * MB(32);
0459     else
0460         return (gms - 0xf0) * MB(4) + MB(4);
0461 }
0462 
0463 struct intel_early_ops {
0464     resource_size_t (*stolen_size)(int num, int slot, int func);
0465     resource_size_t (*stolen_base)(int num, int slot, int func,
0466                        resource_size_t size);
0467 };
0468 
0469 static const struct intel_early_ops i830_early_ops __initconst = {
0470     .stolen_base = i830_stolen_base,
0471     .stolen_size = i830_stolen_size,
0472 };
0473 
0474 static const struct intel_early_ops i845_early_ops __initconst = {
0475     .stolen_base = i845_stolen_base,
0476     .stolen_size = i830_stolen_size,
0477 };
0478 
0479 static const struct intel_early_ops i85x_early_ops __initconst = {
0480     .stolen_base = i85x_stolen_base,
0481     .stolen_size = gen3_stolen_size,
0482 };
0483 
0484 static const struct intel_early_ops i865_early_ops __initconst = {
0485     .stolen_base = i865_stolen_base,
0486     .stolen_size = gen3_stolen_size,
0487 };
0488 
0489 static const struct intel_early_ops gen3_early_ops __initconst = {
0490     .stolen_base = gen3_stolen_base,
0491     .stolen_size = gen3_stolen_size,
0492 };
0493 
0494 static const struct intel_early_ops gen6_early_ops __initconst = {
0495     .stolen_base = gen3_stolen_base,
0496     .stolen_size = gen6_stolen_size,
0497 };
0498 
0499 static const struct intel_early_ops gen8_early_ops __initconst = {
0500     .stolen_base = gen3_stolen_base,
0501     .stolen_size = gen8_stolen_size,
0502 };
0503 
0504 static const struct intel_early_ops gen9_early_ops __initconst = {
0505     .stolen_base = gen3_stolen_base,
0506     .stolen_size = gen9_stolen_size,
0507 };
0508 
0509 static const struct intel_early_ops chv_early_ops __initconst = {
0510     .stolen_base = gen3_stolen_base,
0511     .stolen_size = chv_stolen_size,
0512 };
0513 
0514 static const struct intel_early_ops gen11_early_ops __initconst = {
0515     .stolen_base = gen11_stolen_base,
0516     .stolen_size = gen9_stolen_size,
0517 };
0518 
0519 /* Intel integrated GPUs for which we need to reserve "stolen memory" */
0520 static const struct pci_device_id intel_early_ids[] __initconst = {
0521     INTEL_I830_IDS(&i830_early_ops),
0522     INTEL_I845G_IDS(&i845_early_ops),
0523     INTEL_I85X_IDS(&i85x_early_ops),
0524     INTEL_I865G_IDS(&i865_early_ops),
0525     INTEL_I915G_IDS(&gen3_early_ops),
0526     INTEL_I915GM_IDS(&gen3_early_ops),
0527     INTEL_I945G_IDS(&gen3_early_ops),
0528     INTEL_I945GM_IDS(&gen3_early_ops),
0529     INTEL_VLV_IDS(&gen6_early_ops),
0530     INTEL_PINEVIEW_G_IDS(&gen3_early_ops),
0531     INTEL_PINEVIEW_M_IDS(&gen3_early_ops),
0532     INTEL_I965G_IDS(&gen3_early_ops),
0533     INTEL_G33_IDS(&gen3_early_ops),
0534     INTEL_I965GM_IDS(&gen3_early_ops),
0535     INTEL_GM45_IDS(&gen3_early_ops),
0536     INTEL_G45_IDS(&gen3_early_ops),
0537     INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
0538     INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
0539     INTEL_SNB_D_IDS(&gen6_early_ops),
0540     INTEL_SNB_M_IDS(&gen6_early_ops),
0541     INTEL_IVB_M_IDS(&gen6_early_ops),
0542     INTEL_IVB_D_IDS(&gen6_early_ops),
0543     INTEL_HSW_IDS(&gen6_early_ops),
0544     INTEL_BDW_IDS(&gen8_early_ops),
0545     INTEL_CHV_IDS(&chv_early_ops),
0546     INTEL_SKL_IDS(&gen9_early_ops),
0547     INTEL_BXT_IDS(&gen9_early_ops),
0548     INTEL_KBL_IDS(&gen9_early_ops),
0549     INTEL_CFL_IDS(&gen9_early_ops),
0550     INTEL_GLK_IDS(&gen9_early_ops),
0551     INTEL_CNL_IDS(&gen9_early_ops),
0552     INTEL_ICL_11_IDS(&gen11_early_ops),
0553     INTEL_EHL_IDS(&gen11_early_ops),
0554     INTEL_JSL_IDS(&gen11_early_ops),
0555     INTEL_TGL_12_IDS(&gen11_early_ops),
0556     INTEL_RKL_IDS(&gen11_early_ops),
0557     INTEL_ADLS_IDS(&gen11_early_ops),
0558     INTEL_ADLP_IDS(&gen11_early_ops),
0559     INTEL_ADLN_IDS(&gen11_early_ops),
0560     INTEL_RPLS_IDS(&gen11_early_ops),
0561     INTEL_RPLP_IDS(&gen11_early_ops),
0562 };
0563 
0564 struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
0565 EXPORT_SYMBOL(intel_graphics_stolen_res);
0566 
0567 static void __init
0568 intel_graphics_stolen(int num, int slot, int func,
0569               const struct intel_early_ops *early_ops)
0570 {
0571     resource_size_t base, size;
0572     resource_size_t end;
0573 
0574     size = early_ops->stolen_size(num, slot, func);
0575     base = early_ops->stolen_base(num, slot, func, size);
0576 
0577     if (!size || !base)
0578         return;
0579 
0580     end = base + size - 1;
0581 
0582     intel_graphics_stolen_res.start = base;
0583     intel_graphics_stolen_res.end = end;
0584 
0585     printk(KERN_INFO "Reserving Intel graphics memory at %pR\n",
0586            &intel_graphics_stolen_res);
0587 
0588     /* Mark this space as reserved */
0589     e820__range_add(base, size, E820_TYPE_RESERVED);
0590     e820__update_table(e820_table);
0591 }
0592 
0593 static void __init intel_graphics_quirks(int num, int slot, int func)
0594 {
0595     const struct intel_early_ops *early_ops;
0596     u16 device;
0597     int i;
0598 
0599     /*
0600      * Reserve "stolen memory" for an integrated GPU.  If we've already
0601      * found one, there's nothing to do for other (discrete) GPUs.
0602      */
0603     if (resource_size(&intel_graphics_stolen_res))
0604         return;
0605 
0606     device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
0607 
0608     for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
0609         kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
0610 
0611         if (intel_early_ids[i].device != device)
0612             continue;
0613 
0614         early_ops = (typeof(early_ops))driver_data;
0615 
0616         intel_graphics_stolen(num, slot, func, early_ops);
0617 
0618         return;
0619     }
0620 }
0621 
0622 static void __init force_disable_hpet(int num, int slot, int func)
0623 {
0624 #ifdef CONFIG_HPET_TIMER
0625     boot_hpet_disable = true;
0626     pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
0627 #endif
0628 }
0629 
0630 #define BCM4331_MMIO_SIZE   16384
0631 #define BCM4331_PM_CAP      0x40
0632 #define bcma_aread32(reg)   ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
0633 #define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
0634 
0635 static void __init apple_airport_reset(int bus, int slot, int func)
0636 {
0637     void __iomem *mmio;
0638     u16 pmcsr;
0639     u64 addr;
0640     int i;
0641 
0642     if (!x86_apple_machine)
0643         return;
0644 
0645     /* Card may have been put into PCI_D3hot by grub quirk */
0646     pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
0647 
0648     if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
0649         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
0650         write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
0651         mdelay(10);
0652 
0653         pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
0654         if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
0655             pr_err("pci 0000:%02x:%02x.%d: Cannot power up Apple AirPort card\n",
0656                    bus, slot, func);
0657             return;
0658         }
0659     }
0660 
0661     addr  =      read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
0662     addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
0663     addr &= PCI_BASE_ADDRESS_MEM_MASK;
0664 
0665     mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
0666     if (!mmio) {
0667         pr_err("pci 0000:%02x:%02x.%d: Cannot iomap Apple AirPort card\n",
0668                bus, slot, func);
0669         return;
0670     }
0671 
0672     pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
0673 
0674     for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
0675         udelay(10);
0676 
0677     bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
0678     bcma_aread32(BCMA_RESET_CTL);
0679     udelay(1);
0680 
0681     bcma_awrite32(BCMA_RESET_CTL, 0);
0682     bcma_aread32(BCMA_RESET_CTL);
0683     udelay(10);
0684 
0685     early_iounmap(mmio, BCM4331_MMIO_SIZE);
0686 }
0687 
0688 #define QFLAG_APPLY_ONCE    0x1
0689 #define QFLAG_APPLIED       0x2
0690 #define QFLAG_DONE      (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
0691 struct chipset {
0692     u32 vendor;
0693     u32 device;
0694     u32 class;
0695     u32 class_mask;
0696     u32 flags;
0697     void (*f)(int num, int slot, int func);
0698 };
0699 
0700 static struct chipset early_qrk[] __initdata = {
0701     { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
0702       PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
0703     { PCI_VENDOR_ID_VIA, PCI_ANY_ID,
0704       PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, via_bugs },
0705     { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
0706       PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, fix_hypertransport_config },
0707     { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
0708       PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs },
0709     { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
0710       PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
0711     { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
0712       PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
0713     { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
0714       PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
0715     { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
0716       PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
0717     { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
0718       0, intel_graphics_quirks },
0719     /*
0720      * HPET on the current version of the Baytrail platform has accuracy
0721      * problems: it will halt in deep idle state - so we disable it.
0722      *
0723      * More details can be found in section 18.10.1.3 of the datasheet:
0724      *
0725      *    http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/atom-z8000-datasheet-vol-1.pdf
0726      */
0727     { PCI_VENDOR_ID_INTEL, 0x0f00,
0728         PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
0729     { PCI_VENDOR_ID_BROADCOM, 0x4331,
0730       PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
0731     {}
0732 };
0733 
0734 static void __init early_pci_scan_bus(int bus);
0735 
0736 /**
0737  * check_dev_quirk - apply early quirks to a given PCI device
0738  * @num: bus number
0739  * @slot: slot number
0740  * @func: PCI function
0741  *
0742  * Check the vendor & device ID against the early quirks table.
0743  *
0744  * If the device is single function, let early_pci_scan_bus() know so we don't
0745  * poke at this device again.
0746  */
0747 static int __init check_dev_quirk(int num, int slot, int func)
0748 {
0749     u16 class;
0750     u16 vendor;
0751     u16 device;
0752     u8 type;
0753     u8 sec;
0754     int i;
0755 
0756     class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
0757 
0758     if (class == 0xffff)
0759         return -1; /* no class, treat as single function */
0760 
0761     vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID);
0762 
0763     device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
0764 
0765     for (i = 0; early_qrk[i].f != NULL; i++) {
0766         if (((early_qrk[i].vendor == PCI_ANY_ID) ||
0767             (early_qrk[i].vendor == vendor)) &&
0768             ((early_qrk[i].device == PCI_ANY_ID) ||
0769             (early_qrk[i].device == device)) &&
0770             (!((early_qrk[i].class ^ class) &
0771                 early_qrk[i].class_mask))) {
0772                 if ((early_qrk[i].flags &
0773                      QFLAG_DONE) != QFLAG_DONE)
0774                     early_qrk[i].f(num, slot, func);
0775                 early_qrk[i].flags |= QFLAG_APPLIED;
0776             }
0777     }
0778 
0779     type = read_pci_config_byte(num, slot, func,
0780                     PCI_HEADER_TYPE);
0781 
0782     if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
0783         sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
0784         if (sec > num)
0785             early_pci_scan_bus(sec);
0786     }
0787 
0788     if (!(type & 0x80))
0789         return -1;
0790 
0791     return 0;
0792 }
0793 
0794 static void __init early_pci_scan_bus(int bus)
0795 {
0796     int slot, func;
0797 
0798     /* Poor man's PCI discovery */
0799     for (slot = 0; slot < 32; slot++)
0800         for (func = 0; func < 8; func++) {
0801             /* Only probe function 0 on single fn devices */
0802             if (check_dev_quirk(bus, slot, func))
0803                 break;
0804         }
0805 }
0806 
0807 void __init early_quirks(void)
0808 {
0809     if (!early_pci_allowed())
0810         return;
0811 
0812     early_pci_scan_bus(0);
0813 }