Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * This file contains work-arounds for x86 and x86_64 platform bugs.
0004  */
0005 #include <linux/dmi.h>
0006 #include <linux/pci.h>
0007 #include <linux/irq.h>
0008 
0009 #include <asm/hpet.h>
0010 #include <asm/setup.h>
0011 #include <asm/mce.h>
0012 
0013 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
0014 
0015 static void quirk_intel_irqbalance(struct pci_dev *dev)
0016 {
0017     u8 config;
0018     u16 word;
0019 
0020     /* BIOS may enable hardware IRQ balancing for
0021      * E7520/E7320/E7525(revision ID 0x9 and below)
0022      * based platforms.
0023      * Disable SW irqbalance/affinity on those platforms.
0024      */
0025     if (dev->revision > 0x9)
0026         return;
0027 
0028     /* enable access to config space*/
0029     pci_read_config_byte(dev, 0xf4, &config);
0030     pci_write_config_byte(dev, 0xf4, config|0x2);
0031 
0032     /*
0033      * read xTPR register.  We may not have a pci_dev for device 8
0034      * because it might be hidden until the above write.
0035      */
0036     pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
0037 
0038     if (!(word & (1 << 13))) {
0039         dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
0040             "disabling irq balancing and affinity\n");
0041         noirqdebug_setup("");
0042 #ifdef CONFIG_PROC_FS
0043         no_irq_affinity = 1;
0044 #endif
0045     }
0046 
0047     /* put back the original value for config space*/
0048     if (!(config & 0x2))
0049         pci_write_config_byte(dev, 0xf4, config);
0050 }
0051 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
0052             quirk_intel_irqbalance);
0053 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
0054             quirk_intel_irqbalance);
0055 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
0056             quirk_intel_irqbalance);
0057 #endif
0058 
0059 #if defined(CONFIG_HPET_TIMER)
0060 unsigned long force_hpet_address;
0061 
0062 static enum {
0063     NONE_FORCE_HPET_RESUME,
0064     OLD_ICH_FORCE_HPET_RESUME,
0065     ICH_FORCE_HPET_RESUME,
0066     VT8237_FORCE_HPET_RESUME,
0067     NVIDIA_FORCE_HPET_RESUME,
0068     ATI_FORCE_HPET_RESUME,
0069 } force_hpet_resume_type;
0070 
0071 static void __iomem *rcba_base;
0072 
0073 static void ich_force_hpet_resume(void)
0074 {
0075     u32 val;
0076 
0077     if (!force_hpet_address)
0078         return;
0079 
0080     BUG_ON(rcba_base == NULL);
0081 
0082     /* read the Function Disable register, dword mode only */
0083     val = readl(rcba_base + 0x3404);
0084     if (!(val & 0x80)) {
0085         /* HPET disabled in HPTC. Trying to enable */
0086         writel(val | 0x80, rcba_base + 0x3404);
0087     }
0088 
0089     val = readl(rcba_base + 0x3404);
0090     if (!(val & 0x80))
0091         BUG();
0092     else
0093         printk(KERN_DEBUG "Force enabled HPET at resume\n");
0094 }
0095 
0096 static void ich_force_enable_hpet(struct pci_dev *dev)
0097 {
0098     u32 val;
0099     u32 rcba;
0100     int err = 0;
0101 
0102     if (hpet_address || force_hpet_address)
0103         return;
0104 
0105     pci_read_config_dword(dev, 0xF0, &rcba);
0106     rcba &= 0xFFFFC000;
0107     if (rcba == 0) {
0108         dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
0109             "cannot force enable HPET\n");
0110         return;
0111     }
0112 
0113     /* use bits 31:14, 16 kB aligned */
0114     rcba_base = ioremap(rcba, 0x4000);
0115     if (rcba_base == NULL) {
0116         dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
0117             "cannot force enable HPET\n");
0118         return;
0119     }
0120 
0121     /* read the Function Disable register, dword mode only */
0122     val = readl(rcba_base + 0x3404);
0123 
0124     if (val & 0x80) {
0125         /* HPET is enabled in HPTC. Just not reported by BIOS */
0126         val = val & 0x3;
0127         force_hpet_address = 0xFED00000 | (val << 12);
0128         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
0129             "0x%lx\n", force_hpet_address);
0130         iounmap(rcba_base);
0131         return;
0132     }
0133 
0134     /* HPET disabled in HPTC. Trying to enable */
0135     writel(val | 0x80, rcba_base + 0x3404);
0136 
0137     val = readl(rcba_base + 0x3404);
0138     if (!(val & 0x80)) {
0139         err = 1;
0140     } else {
0141         val = val & 0x3;
0142         force_hpet_address = 0xFED00000 | (val << 12);
0143     }
0144 
0145     if (err) {
0146         force_hpet_address = 0;
0147         iounmap(rcba_base);
0148         dev_printk(KERN_DEBUG, &dev->dev,
0149             "Failed to force enable HPET\n");
0150     } else {
0151         force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
0152         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
0153             "0x%lx\n", force_hpet_address);
0154     }
0155 }
0156 
0157 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
0158              ich_force_enable_hpet);
0159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
0160              ich_force_enable_hpet);
0161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
0162              ich_force_enable_hpet);
0163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
0164              ich_force_enable_hpet);
0165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
0166              ich_force_enable_hpet);
0167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
0168              ich_force_enable_hpet);
0169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
0170              ich_force_enable_hpet);
0171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
0172              ich_force_enable_hpet);
0173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
0174              ich_force_enable_hpet);
0175 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,   /* ICH10 */
0176              ich_force_enable_hpet);
0177 
0178 static struct pci_dev *cached_dev;
0179 
0180 static void hpet_print_force_info(void)
0181 {
0182     printk(KERN_INFO "HPET not enabled in BIOS. "
0183            "You might try hpet=force boot option\n");
0184 }
0185 
0186 static void old_ich_force_hpet_resume(void)
0187 {
0188     u32 val;
0189     u32 gen_cntl;
0190 
0191     if (!force_hpet_address || !cached_dev)
0192         return;
0193 
0194     pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
0195     gen_cntl &= (~(0x7 << 15));
0196     gen_cntl |= (0x4 << 15);
0197 
0198     pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
0199     pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
0200     val = gen_cntl >> 15;
0201     val &= 0x7;
0202     if (val == 0x4)
0203         printk(KERN_DEBUG "Force enabled HPET at resume\n");
0204     else
0205         BUG();
0206 }
0207 
0208 static void old_ich_force_enable_hpet(struct pci_dev *dev)
0209 {
0210     u32 val;
0211     u32 gen_cntl;
0212 
0213     if (hpet_address || force_hpet_address)
0214         return;
0215 
0216     pci_read_config_dword(dev, 0xD0, &gen_cntl);
0217     /*
0218      * Bit 17 is HPET enable bit.
0219      * Bit 16:15 control the HPET base address.
0220      */
0221     val = gen_cntl >> 15;
0222     val &= 0x7;
0223     if (val & 0x4) {
0224         val &= 0x3;
0225         force_hpet_address = 0xFED00000 | (val << 12);
0226         dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
0227             force_hpet_address);
0228         return;
0229     }
0230 
0231     /*
0232      * HPET is disabled. Trying enabling at FED00000 and check
0233      * whether it sticks
0234      */
0235     gen_cntl &= (~(0x7 << 15));
0236     gen_cntl |= (0x4 << 15);
0237     pci_write_config_dword(dev, 0xD0, gen_cntl);
0238 
0239     pci_read_config_dword(dev, 0xD0, &gen_cntl);
0240 
0241     val = gen_cntl >> 15;
0242     val &= 0x7;
0243     if (val & 0x4) {
0244         /* HPET is enabled in HPTC. Just not reported by BIOS */
0245         val &= 0x3;
0246         force_hpet_address = 0xFED00000 | (val << 12);
0247         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
0248             "0x%lx\n", force_hpet_address);
0249         cached_dev = dev;
0250         force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
0251         return;
0252     }
0253 
0254     dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
0255 }
0256 
0257 /*
0258  * Undocumented chipset features. Make sure that the user enforced
0259  * this.
0260  */
0261 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
0262 {
0263     if (hpet_force_user)
0264         old_ich_force_enable_hpet(dev);
0265 }
0266 
0267 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
0268              old_ich_force_enable_hpet_user);
0269 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
0270              old_ich_force_enable_hpet_user);
0271 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
0272              old_ich_force_enable_hpet_user);
0273 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
0274              old_ich_force_enable_hpet_user);
0275 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
0276              old_ich_force_enable_hpet_user);
0277 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
0278              old_ich_force_enable_hpet);
0279 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
0280              old_ich_force_enable_hpet);
0281 
0282 
0283 static void vt8237_force_hpet_resume(void)
0284 {
0285     u32 val;
0286 
0287     if (!force_hpet_address || !cached_dev)
0288         return;
0289 
0290     val = 0xfed00000 | 0x80;
0291     pci_write_config_dword(cached_dev, 0x68, val);
0292 
0293     pci_read_config_dword(cached_dev, 0x68, &val);
0294     if (val & 0x80)
0295         printk(KERN_DEBUG "Force enabled HPET at resume\n");
0296     else
0297         BUG();
0298 }
0299 
0300 static void vt8237_force_enable_hpet(struct pci_dev *dev)
0301 {
0302     u32 val;
0303 
0304     if (hpet_address || force_hpet_address)
0305         return;
0306 
0307     if (!hpet_force_user) {
0308         hpet_print_force_info();
0309         return;
0310     }
0311 
0312     pci_read_config_dword(dev, 0x68, &val);
0313     /*
0314      * Bit 7 is HPET enable bit.
0315      * Bit 31:10 is HPET base address (contrary to what datasheet claims)
0316      */
0317     if (val & 0x80) {
0318         force_hpet_address = (val & ~0x3ff);
0319         dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
0320             force_hpet_address);
0321         return;
0322     }
0323 
0324     /*
0325      * HPET is disabled. Trying enabling at FED00000 and check
0326      * whether it sticks
0327      */
0328     val = 0xfed00000 | 0x80;
0329     pci_write_config_dword(dev, 0x68, val);
0330 
0331     pci_read_config_dword(dev, 0x68, &val);
0332     if (val & 0x80) {
0333         force_hpet_address = (val & ~0x3ff);
0334         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
0335             "0x%lx\n", force_hpet_address);
0336         cached_dev = dev;
0337         force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
0338         return;
0339     }
0340 
0341     dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
0342 }
0343 
0344 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
0345              vt8237_force_enable_hpet);
0346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
0347              vt8237_force_enable_hpet);
0348 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
0349              vt8237_force_enable_hpet);
0350 
0351 static void ati_force_hpet_resume(void)
0352 {
0353     pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
0354     printk(KERN_DEBUG "Force enabled HPET at resume\n");
0355 }
0356 
0357 static u32 ati_ixp4x0_rev(struct pci_dev *dev)
0358 {
0359     int err = 0;
0360     u32 d = 0;
0361     u8  b = 0;
0362 
0363     err = pci_read_config_byte(dev, 0xac, &b);
0364     b &= ~(1<<5);
0365     err |= pci_write_config_byte(dev, 0xac, b);
0366     err |= pci_read_config_dword(dev, 0x70, &d);
0367     d |= 1<<8;
0368     err |= pci_write_config_dword(dev, 0x70, d);
0369     err |= pci_read_config_dword(dev, 0x8, &d);
0370     d &= 0xff;
0371     dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
0372 
0373     WARN_ON_ONCE(err);
0374 
0375     return d;
0376 }
0377 
0378 static void ati_force_enable_hpet(struct pci_dev *dev)
0379 {
0380     u32 d, val;
0381     u8  b;
0382 
0383     if (hpet_address || force_hpet_address)
0384         return;
0385 
0386     if (!hpet_force_user) {
0387         hpet_print_force_info();
0388         return;
0389     }
0390 
0391     d = ati_ixp4x0_rev(dev);
0392     if (d  < 0x82)
0393         return;
0394 
0395     /* base address */
0396     pci_write_config_dword(dev, 0x14, 0xfed00000);
0397     pci_read_config_dword(dev, 0x14, &val);
0398 
0399     /* enable interrupt */
0400     outb(0x72, 0xcd6); b = inb(0xcd7);
0401     b |= 0x1;
0402     outb(0x72, 0xcd6); outb(b, 0xcd7);
0403     outb(0x72, 0xcd6); b = inb(0xcd7);
0404     if (!(b & 0x1))
0405         return;
0406     pci_read_config_dword(dev, 0x64, &d);
0407     d |= (1<<10);
0408     pci_write_config_dword(dev, 0x64, d);
0409     pci_read_config_dword(dev, 0x64, &d);
0410     if (!(d & (1<<10)))
0411         return;
0412 
0413     force_hpet_address = val;
0414     force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
0415     dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
0416            force_hpet_address);
0417     cached_dev = dev;
0418 }
0419 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
0420              ati_force_enable_hpet);
0421 
0422 /*
0423  * Undocumented chipset feature taken from LinuxBIOS.
0424  */
0425 static void nvidia_force_hpet_resume(void)
0426 {
0427     pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
0428     printk(KERN_DEBUG "Force enabled HPET at resume\n");
0429 }
0430 
0431 static void nvidia_force_enable_hpet(struct pci_dev *dev)
0432 {
0433     u32 val;
0434 
0435     if (hpet_address || force_hpet_address)
0436         return;
0437 
0438     if (!hpet_force_user) {
0439         hpet_print_force_info();
0440         return;
0441     }
0442 
0443     pci_write_config_dword(dev, 0x44, 0xfed00001);
0444     pci_read_config_dword(dev, 0x44, &val);
0445     force_hpet_address = val & 0xfffffffe;
0446     force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
0447     dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
0448         force_hpet_address);
0449     cached_dev = dev;
0450 }
0451 
0452 /* ISA Bridges */
0453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
0454             nvidia_force_enable_hpet);
0455 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
0456             nvidia_force_enable_hpet);
0457 
0458 /* LPC bridges */
0459 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
0460             nvidia_force_enable_hpet);
0461 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
0462             nvidia_force_enable_hpet);
0463 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
0464             nvidia_force_enable_hpet);
0465 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
0466             nvidia_force_enable_hpet);
0467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
0468             nvidia_force_enable_hpet);
0469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
0470             nvidia_force_enable_hpet);
0471 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
0472             nvidia_force_enable_hpet);
0473 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
0474             nvidia_force_enable_hpet);
0475 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
0476             nvidia_force_enable_hpet);
0477 
0478 void force_hpet_resume(void)
0479 {
0480     switch (force_hpet_resume_type) {
0481     case ICH_FORCE_HPET_RESUME:
0482         ich_force_hpet_resume();
0483         return;
0484     case OLD_ICH_FORCE_HPET_RESUME:
0485         old_ich_force_hpet_resume();
0486         return;
0487     case VT8237_FORCE_HPET_RESUME:
0488         vt8237_force_hpet_resume();
0489         return;
0490     case NVIDIA_FORCE_HPET_RESUME:
0491         nvidia_force_hpet_resume();
0492         return;
0493     case ATI_FORCE_HPET_RESUME:
0494         ati_force_hpet_resume();
0495         return;
0496     default:
0497         break;
0498     }
0499 }
0500 
0501 /*
0502  * According to the datasheet e6xx systems have the HPET hardwired to
0503  * 0xfed00000
0504  */
0505 static void e6xx_force_enable_hpet(struct pci_dev *dev)
0506 {
0507     if (hpet_address || force_hpet_address)
0508         return;
0509 
0510     force_hpet_address = 0xFED00000;
0511     force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
0512     dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
0513         "0x%lx\n", force_hpet_address);
0514 }
0515 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
0516              e6xx_force_enable_hpet);
0517 
0518 /*
0519  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
0520  * floppy DMA. Disable HPET MSI on such platforms.
0521  * See erratum #27 (Misinterpreted MSI Requests May Result in
0522  * Corrupted LPC DMA Data) in AMD Publication #46837,
0523  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
0524  */
0525 static void force_disable_hpet_msi(struct pci_dev *unused)
0526 {
0527     hpet_msi_disable = true;
0528 }
0529 
0530 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
0531              force_disable_hpet_msi);
0532 
0533 #endif
0534 
0535 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
0536 /* Set correct numa_node information for AMD NB functions */
0537 static void quirk_amd_nb_node(struct pci_dev *dev)
0538 {
0539     struct pci_dev *nb_ht;
0540     unsigned int devfn;
0541     u32 node;
0542     u32 val;
0543 
0544     devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
0545     nb_ht = pci_get_slot(dev->bus, devfn);
0546     if (!nb_ht)
0547         return;
0548 
0549     pci_read_config_dword(nb_ht, 0x60, &val);
0550     node = pcibus_to_node(dev->bus) | (val & 7);
0551     /*
0552      * Some hardware may return an invalid node ID,
0553      * so check it first:
0554      */
0555     if (node_online(node))
0556         set_dev_node(&dev->dev, node);
0557     pci_dev_put(nb_ht);
0558 }
0559 
0560 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
0561             quirk_amd_nb_node);
0562 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
0563             quirk_amd_nb_node);
0564 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
0565             quirk_amd_nb_node);
0566 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
0567             quirk_amd_nb_node);
0568 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
0569             quirk_amd_nb_node);
0570 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
0571             quirk_amd_nb_node);
0572 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
0573             quirk_amd_nb_node);
0574 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
0575             quirk_amd_nb_node);
0576 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
0577             quirk_amd_nb_node);
0578 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
0579             quirk_amd_nb_node);
0580 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
0581             quirk_amd_nb_node);
0582 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
0583             quirk_amd_nb_node);
0584 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
0585             quirk_amd_nb_node);
0586 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
0587             quirk_amd_nb_node);
0588 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
0589             quirk_amd_nb_node);
0590 
0591 #endif
0592 
0593 #ifdef CONFIG_PCI
0594 /*
0595  * Processor does not ensure DRAM scrub read/write sequence
0596  * is atomic wrt accesses to CC6 save state area. Therefore
0597  * if a concurrent scrub read/write access is to same address
0598  * the entry may appear as if it is not written. This quirk
0599  * applies to Fam16h models 00h-0Fh
0600  *
0601  * See "Revision Guide" for AMD F16h models 00h-0fh,
0602  * document 51810 rev. 3.04, Nov 2013
0603  */
0604 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
0605 {
0606     u32 val;
0607 
0608     /*
0609      * Suggested workaround:
0610      * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
0611      */
0612     pci_read_config_dword(dev, 0x58, &val);
0613     if (val & 0x1F) {
0614         val &= ~(0x1F);
0615         pci_write_config_dword(dev, 0x58, val);
0616     }
0617 
0618     pci_read_config_dword(dev, 0x5C, &val);
0619     if (val & BIT(0)) {
0620         val &= ~BIT(0);
0621         pci_write_config_dword(dev, 0x5c, val);
0622     }
0623 }
0624 
0625 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
0626             amd_disable_seq_and_redirect_scrub);
0627 
0628 /* Ivy Bridge, Haswell, Broadwell */
0629 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
0630 {
0631     u32 capid0;
0632 
0633     pci_read_config_dword(pdev, 0x84, &capid0);
0634 
0635     if (capid0 & 0x10)
0636         enable_copy_mc_fragile();
0637 }
0638 
0639 /* Skylake */
0640 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
0641 {
0642     u32 capid0, capid5;
0643 
0644     pci_read_config_dword(pdev, 0x84, &capid0);
0645     pci_read_config_dword(pdev, 0x98, &capid5);
0646 
0647     /*
0648      * CAPID0{7:6} indicate whether this is an advanced RAS SKU
0649      * CAPID5{8:5} indicate that various NVDIMM usage modes are
0650      * enabled, so memory machine check recovery is also enabled.
0651      */
0652     if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
0653         enable_copy_mc_fragile();
0654 
0655 }
0656 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
0657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
0658 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
0659 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
0660 #endif
0661 
0662 bool x86_apple_machine;
0663 EXPORT_SYMBOL(x86_apple_machine);
0664 
0665 void __init early_platform_quirks(void)
0666 {
0667     x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
0668                 dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
0669 }