Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Shared support code for AMD K8 northbridges and derivatives.
0004  * Copyright 2006 Andi Kleen, SUSE Labs.
0005  */
0006 
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008 
0009 #include <linux/types.h>
0010 #include <linux/slab.h>
0011 #include <linux/init.h>
0012 #include <linux/errno.h>
0013 #include <linux/export.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/pci_ids.h>
0016 #include <asm/amd_nb.h>
0017 
0018 #define PCI_DEVICE_ID_AMD_17H_ROOT  0x1450
0019 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
0020 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
0021 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
0022 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5
0023 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4
0024 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8
0025 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
0026 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
0027 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
0028 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
0029 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
0030 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
0031 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728
0032 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
0033 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
0034 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
0035 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
0036 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
0037 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
0038 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
0039 
0040 /* Protect the PCI config register pairs used for SMN. */
0041 static DEFINE_MUTEX(smn_mutex);
0042 
0043 static u32 *flush_words;
0044 
0045 static const struct pci_device_id amd_root_ids[] = {
0046     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
0047     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
0048     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
0049     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
0050     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
0051     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
0052     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
0053     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
0054     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
0055     {}
0056 };
0057 
0058 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
0059 
0060 static const struct pci_device_id amd_nb_misc_ids[] = {
0061     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
0062     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
0063     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
0064     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
0065     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
0066     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
0067     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
0068     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
0069     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
0070     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
0071     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
0072     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
0073     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
0074     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
0075     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
0076     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
0077     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
0078     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
0079     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
0080     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
0081     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
0082     {}
0083 };
0084 
0085 static const struct pci_device_id amd_nb_link_ids[] = {
0086     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
0087     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
0088     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
0089     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
0090     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
0091     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
0092     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
0093     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
0094     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
0095     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
0096     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
0097     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
0098     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
0099     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
0100     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
0101     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
0102     {}
0103 };
0104 
0105 static const struct pci_device_id hygon_root_ids[] = {
0106     { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
0107     {}
0108 };
0109 
0110 static const struct pci_device_id hygon_nb_misc_ids[] = {
0111     { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
0112     {}
0113 };
0114 
0115 static const struct pci_device_id hygon_nb_link_ids[] = {
0116     { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
0117     {}
0118 };
0119 
0120 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
0121     { 0x00, 0x18, 0x20 },
0122     { 0xff, 0x00, 0x20 },
0123     { 0xfe, 0x00, 0x20 },
0124     { }
0125 };
0126 
0127 static struct amd_northbridge_info amd_northbridges;
0128 
0129 u16 amd_nb_num(void)
0130 {
0131     return amd_northbridges.num;
0132 }
0133 EXPORT_SYMBOL_GPL(amd_nb_num);
0134 
0135 bool amd_nb_has_feature(unsigned int feature)
0136 {
0137     return ((amd_northbridges.flags & feature) == feature);
0138 }
0139 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
0140 
0141 struct amd_northbridge *node_to_amd_nb(int node)
0142 {
0143     return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
0144 }
0145 EXPORT_SYMBOL_GPL(node_to_amd_nb);
0146 
0147 static struct pci_dev *next_northbridge(struct pci_dev *dev,
0148                     const struct pci_device_id *ids)
0149 {
0150     do {
0151         dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
0152         if (!dev)
0153             break;
0154     } while (!pci_match_id(ids, dev));
0155     return dev;
0156 }
0157 
0158 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
0159 {
0160     struct pci_dev *root;
0161     int err = -ENODEV;
0162 
0163     if (node >= amd_northbridges.num)
0164         goto out;
0165 
0166     root = node_to_amd_nb(node)->root;
0167     if (!root)
0168         goto out;
0169 
0170     mutex_lock(&smn_mutex);
0171 
0172     err = pci_write_config_dword(root, 0x60, address);
0173     if (err) {
0174         pr_warn("Error programming SMN address 0x%x.\n", address);
0175         goto out_unlock;
0176     }
0177 
0178     err = (write ? pci_write_config_dword(root, 0x64, *value)
0179              : pci_read_config_dword(root, 0x64, value));
0180     if (err)
0181         pr_warn("Error %s SMN address 0x%x.\n",
0182             (write ? "writing to" : "reading from"), address);
0183 
0184 out_unlock:
0185     mutex_unlock(&smn_mutex);
0186 
0187 out:
0188     return err;
0189 }
0190 
0191 int amd_smn_read(u16 node, u32 address, u32 *value)
0192 {
0193     return __amd_smn_rw(node, address, value, false);
0194 }
0195 EXPORT_SYMBOL_GPL(amd_smn_read);
0196 
0197 int amd_smn_write(u16 node, u32 address, u32 value)
0198 {
0199     return __amd_smn_rw(node, address, &value, true);
0200 }
0201 EXPORT_SYMBOL_GPL(amd_smn_write);
0202 
0203 
0204 static int amd_cache_northbridges(void)
0205 {
0206     const struct pci_device_id *misc_ids = amd_nb_misc_ids;
0207     const struct pci_device_id *link_ids = amd_nb_link_ids;
0208     const struct pci_device_id *root_ids = amd_root_ids;
0209     struct pci_dev *root, *misc, *link;
0210     struct amd_northbridge *nb;
0211     u16 roots_per_misc = 0;
0212     u16 misc_count = 0;
0213     u16 root_count = 0;
0214     u16 i, j;
0215 
0216     if (amd_northbridges.num)
0217         return 0;
0218 
0219     if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
0220         root_ids = hygon_root_ids;
0221         misc_ids = hygon_nb_misc_ids;
0222         link_ids = hygon_nb_link_ids;
0223     }
0224 
0225     misc = NULL;
0226     while ((misc = next_northbridge(misc, misc_ids)))
0227         misc_count++;
0228 
0229     if (!misc_count)
0230         return -ENODEV;
0231 
0232     root = NULL;
0233     while ((root = next_northbridge(root, root_ids)))
0234         root_count++;
0235 
0236     if (root_count) {
0237         roots_per_misc = root_count / misc_count;
0238 
0239         /*
0240          * There should be _exactly_ N roots for each DF/SMN
0241          * interface.
0242          */
0243         if (!roots_per_misc || (root_count % roots_per_misc)) {
0244             pr_info("Unsupported AMD DF/PCI configuration found\n");
0245             return -ENODEV;
0246         }
0247     }
0248 
0249     nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
0250     if (!nb)
0251         return -ENOMEM;
0252 
0253     amd_northbridges.nb = nb;
0254     amd_northbridges.num = misc_count;
0255 
0256     link = misc = root = NULL;
0257     for (i = 0; i < amd_northbridges.num; i++) {
0258         node_to_amd_nb(i)->root = root =
0259             next_northbridge(root, root_ids);
0260         node_to_amd_nb(i)->misc = misc =
0261             next_northbridge(misc, misc_ids);
0262         node_to_amd_nb(i)->link = link =
0263             next_northbridge(link, link_ids);
0264 
0265         /*
0266          * If there are more PCI root devices than data fabric/
0267          * system management network interfaces, then the (N)
0268          * PCI roots per DF/SMN interface are functionally the
0269          * same (for DF/SMN access) and N-1 are redundant.  N-1
0270          * PCI roots should be skipped per DF/SMN interface so
0271          * the following DF/SMN interfaces get mapped to
0272          * correct PCI roots.
0273          */
0274         for (j = 1; j < roots_per_misc; j++)
0275             root = next_northbridge(root, root_ids);
0276     }
0277 
0278     if (amd_gart_present())
0279         amd_northbridges.flags |= AMD_NB_GART;
0280 
0281     /*
0282      * Check for L3 cache presence.
0283      */
0284     if (!cpuid_edx(0x80000006))
0285         return 0;
0286 
0287     /*
0288      * Some CPU families support L3 Cache Index Disable. There are some
0289      * limitations because of E382 and E388 on family 0x10.
0290      */
0291     if (boot_cpu_data.x86 == 0x10 &&
0292         boot_cpu_data.x86_model >= 0x8 &&
0293         (boot_cpu_data.x86_model > 0x9 ||
0294          boot_cpu_data.x86_stepping >= 0x1))
0295         amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
0296 
0297     if (boot_cpu_data.x86 == 0x15)
0298         amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
0299 
0300     /* L3 cache partitioning is supported on family 0x15 */
0301     if (boot_cpu_data.x86 == 0x15)
0302         amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
0303 
0304     return 0;
0305 }
0306 
0307 /*
0308  * Ignores subdevice/subvendor but as far as I can figure out
0309  * they're useless anyways
0310  */
0311 bool __init early_is_amd_nb(u32 device)
0312 {
0313     const struct pci_device_id *misc_ids = amd_nb_misc_ids;
0314     const struct pci_device_id *id;
0315     u32 vendor = device & 0xffff;
0316 
0317     if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
0318         boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
0319         return false;
0320 
0321     if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
0322         misc_ids = hygon_nb_misc_ids;
0323 
0324     device >>= 16;
0325     for (id = misc_ids; id->vendor; id++)
0326         if (vendor == id->vendor && device == id->device)
0327             return true;
0328     return false;
0329 }
0330 
0331 struct resource *amd_get_mmconfig_range(struct resource *res)
0332 {
0333     u32 address;
0334     u64 base, msr;
0335     unsigned int segn_busn_bits;
0336 
0337     if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
0338         boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
0339         return NULL;
0340 
0341     /* assume all cpus from fam10h have mmconfig */
0342     if (boot_cpu_data.x86 < 0x10)
0343         return NULL;
0344 
0345     address = MSR_FAM10H_MMIO_CONF_BASE;
0346     rdmsrl(address, msr);
0347 
0348     /* mmconfig is not enabled */
0349     if (!(msr & FAM10H_MMIO_CONF_ENABLE))
0350         return NULL;
0351 
0352     base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
0353 
0354     segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
0355              FAM10H_MMIO_CONF_BUSRANGE_MASK;
0356 
0357     res->flags = IORESOURCE_MEM;
0358     res->start = base;
0359     res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
0360     return res;
0361 }
0362 
0363 int amd_get_subcaches(int cpu)
0364 {
0365     struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
0366     unsigned int mask;
0367 
0368     if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
0369         return 0;
0370 
0371     pci_read_config_dword(link, 0x1d4, &mask);
0372 
0373     return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
0374 }
0375 
0376 int amd_set_subcaches(int cpu, unsigned long mask)
0377 {
0378     static unsigned int reset, ban;
0379     struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
0380     unsigned int reg;
0381     int cuid;
0382 
0383     if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
0384         return -EINVAL;
0385 
0386     /* if necessary, collect reset state of L3 partitioning and BAN mode */
0387     if (reset == 0) {
0388         pci_read_config_dword(nb->link, 0x1d4, &reset);
0389         pci_read_config_dword(nb->misc, 0x1b8, &ban);
0390         ban &= 0x180000;
0391     }
0392 
0393     /* deactivate BAN mode if any subcaches are to be disabled */
0394     if (mask != 0xf) {
0395         pci_read_config_dword(nb->misc, 0x1b8, &reg);
0396         pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
0397     }
0398 
0399     cuid = cpu_data(cpu).cpu_core_id;
0400     mask <<= 4 * cuid;
0401     mask |= (0xf ^ (1 << cuid)) << 26;
0402 
0403     pci_write_config_dword(nb->link, 0x1d4, mask);
0404 
0405     /* reset BAN mode if L3 partitioning returned to reset state */
0406     pci_read_config_dword(nb->link, 0x1d4, &reg);
0407     if (reg == reset) {
0408         pci_read_config_dword(nb->misc, 0x1b8, &reg);
0409         reg &= ~0x180000;
0410         pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
0411     }
0412 
0413     return 0;
0414 }
0415 
0416 static void amd_cache_gart(void)
0417 {
0418     u16 i;
0419 
0420     if (!amd_nb_has_feature(AMD_NB_GART))
0421         return;
0422 
0423     flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
0424     if (!flush_words) {
0425         amd_northbridges.flags &= ~AMD_NB_GART;
0426         pr_notice("Cannot initialize GART flush words, GART support disabled\n");
0427         return;
0428     }
0429 
0430     for (i = 0; i != amd_northbridges.num; i++)
0431         pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
0432 }
0433 
0434 void amd_flush_garts(void)
0435 {
0436     int flushed, i;
0437     unsigned long flags;
0438     static DEFINE_SPINLOCK(gart_lock);
0439 
0440     if (!amd_nb_has_feature(AMD_NB_GART))
0441         return;
0442 
0443     /*
0444      * Avoid races between AGP and IOMMU. In theory it's not needed
0445      * but I'm not sure if the hardware won't lose flush requests
0446      * when another is pending. This whole thing is so expensive anyways
0447      * that it doesn't matter to serialize more. -AK
0448      */
0449     spin_lock_irqsave(&gart_lock, flags);
0450     flushed = 0;
0451     for (i = 0; i < amd_northbridges.num; i++) {
0452         pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
0453                        flush_words[i] | 1);
0454         flushed++;
0455     }
0456     for (i = 0; i < amd_northbridges.num; i++) {
0457         u32 w;
0458         /* Make sure the hardware actually executed the flush*/
0459         for (;;) {
0460             pci_read_config_dword(node_to_amd_nb(i)->misc,
0461                           0x9c, &w);
0462             if (!(w & 1))
0463                 break;
0464             cpu_relax();
0465         }
0466     }
0467     spin_unlock_irqrestore(&gart_lock, flags);
0468     if (!flushed)
0469         pr_notice("nothing to flush?\n");
0470 }
0471 EXPORT_SYMBOL_GPL(amd_flush_garts);
0472 
0473 static void __fix_erratum_688(void *info)
0474 {
0475 #define MSR_AMD64_IC_CFG 0xC0011021
0476 
0477     msr_set_bit(MSR_AMD64_IC_CFG, 3);
0478     msr_set_bit(MSR_AMD64_IC_CFG, 14);
0479 }
0480 
0481 /* Apply erratum 688 fix so machines without a BIOS fix work. */
0482 static __init void fix_erratum_688(void)
0483 {
0484     struct pci_dev *F4;
0485     u32 val;
0486 
0487     if (boot_cpu_data.x86 != 0x14)
0488         return;
0489 
0490     if (!amd_northbridges.num)
0491         return;
0492 
0493     F4 = node_to_amd_nb(0)->link;
0494     if (!F4)
0495         return;
0496 
0497     if (pci_read_config_dword(F4, 0x164, &val))
0498         return;
0499 
0500     if (val & BIT(2))
0501         return;
0502 
0503     on_each_cpu(__fix_erratum_688, NULL, 0);
0504 
0505     pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
0506 }
0507 
0508 static __init int init_amd_nbs(void)
0509 {
0510     amd_cache_northbridges();
0511     amd_cache_gart();
0512 
0513     fix_erratum_688();
0514 
0515     return 0;
0516 }
0517 
0518 /* This has to go after the PCI subsystem */
0519 fs_initcall(init_amd_nbs);