0001
0002 #include <linux/export.h>
0003 #include <linux/bitops.h>
0004 #include <linux/elf.h>
0005 #include <linux/mm.h>
0006
0007 #include <linux/io.h>
0008 #include <linux/sched.h>
0009 #include <linux/sched/clock.h>
0010 #include <linux/random.h>
0011 #include <linux/topology.h>
0012 #include <asm/processor.h>
0013 #include <asm/apic.h>
0014 #include <asm/cacheinfo.h>
0015 #include <asm/cpu.h>
0016 #include <asm/spec-ctrl.h>
0017 #include <asm/smp.h>
0018 #include <asm/numa.h>
0019 #include <asm/pci-direct.h>
0020 #include <asm/delay.h>
0021 #include <asm/debugreg.h>
0022 #include <asm/resctrl.h>
0023
0024 #ifdef CONFIG_X86_64
0025 # include <asm/mmconfig.h>
0026 #endif
0027
0028 #include "cpu.h"
0029
0030 static const int amd_erratum_383[];
0031 static const int amd_erratum_400[];
0032 static const int amd_erratum_1054[];
0033 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
0034
0035
0036
0037
0038
0039
0040 static u32 nodes_per_socket = 1;
0041
0042 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
0043 {
0044 u32 gprs[8] = { 0 };
0045 int err;
0046
0047 WARN_ONCE((boot_cpu_data.x86 != 0xf),
0048 "%s should only be used on K8!\n", __func__);
0049
0050 gprs[1] = msr;
0051 gprs[7] = 0x9c5a203a;
0052
0053 err = rdmsr_safe_regs(gprs);
0054
0055 *p = gprs[0] | ((u64)gprs[2] << 32);
0056
0057 return err;
0058 }
0059
0060 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
0061 {
0062 u32 gprs[8] = { 0 };
0063
0064 WARN_ONCE((boot_cpu_data.x86 != 0xf),
0065 "%s should only be used on K8!\n", __func__);
0066
0067 gprs[0] = (u32)val;
0068 gprs[1] = msr;
0069 gprs[2] = val >> 32;
0070 gprs[7] = 0x9c5a203a;
0071
0072 return wrmsr_safe_regs(gprs);
0073 }
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 #ifdef CONFIG_X86_32
0090 extern __visible void vide(void);
0091 __asm__(".text\n"
0092 ".globl vide\n"
0093 ".type vide, @function\n"
0094 ".align 4\n"
0095 "vide: ret\n");
0096 #endif
0097
0098 static void init_amd_k5(struct cpuinfo_x86 *c)
0099 {
0100 #ifdef CONFIG_X86_32
0101
0102
0103
0104
0105
0106
0107 #define CBAR (0xfffc)
0108 #define CBAR_ENB (0x80000000)
0109 #define CBAR_KEY (0X000000CB)
0110 if (c->x86_model == 9 || c->x86_model == 10) {
0111 if (inl(CBAR) & CBAR_ENB)
0112 outl(0 | CBAR_KEY, CBAR);
0113 }
0114 #endif
0115 }
0116
0117 static void init_amd_k6(struct cpuinfo_x86 *c)
0118 {
0119 #ifdef CONFIG_X86_32
0120 u32 l, h;
0121 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
0122
0123 if (c->x86_model < 6) {
0124
0125 if (c->x86_model == 0) {
0126 clear_cpu_cap(c, X86_FEATURE_APIC);
0127 set_cpu_cap(c, X86_FEATURE_PGE);
0128 }
0129 return;
0130 }
0131
0132 if (c->x86_model == 6 && c->x86_stepping == 1) {
0133 const int K6_BUG_LOOP = 1000000;
0134 int n;
0135 void (*f_vide)(void);
0136 u64 d, d2;
0137
0138 pr_info("AMD K6 stepping B detected - ");
0139
0140
0141
0142
0143
0144
0145 n = K6_BUG_LOOP;
0146 f_vide = vide;
0147 OPTIMIZER_HIDE_VAR(f_vide);
0148 d = rdtsc();
0149 while (n--)
0150 f_vide();
0151 d2 = rdtsc();
0152 d = d2-d;
0153
0154 if (d > 20*K6_BUG_LOOP)
0155 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
0156 else
0157 pr_cont("probably OK (after B9730xxxx).\n");
0158 }
0159
0160
0161 if (c->x86_model < 8 ||
0162 (c->x86_model == 8 && c->x86_stepping < 8)) {
0163
0164 if (mbytes > 508)
0165 mbytes = 508;
0166
0167 rdmsr(MSR_K6_WHCR, l, h);
0168 if ((l&0x0000FFFF) == 0) {
0169 unsigned long flags;
0170 l = (1<<0)|((mbytes/4)<<1);
0171 local_irq_save(flags);
0172 wbinvd();
0173 wrmsr(MSR_K6_WHCR, l, h);
0174 local_irq_restore(flags);
0175 pr_info("Enabling old style K6 write allocation for %d Mb\n",
0176 mbytes);
0177 }
0178 return;
0179 }
0180
0181 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
0182 c->x86_model == 9 || c->x86_model == 13) {
0183
0184
0185 if (mbytes > 4092)
0186 mbytes = 4092;
0187
0188 rdmsr(MSR_K6_WHCR, l, h);
0189 if ((l&0xFFFF0000) == 0) {
0190 unsigned long flags;
0191 l = ((mbytes>>2)<<22)|(1<<16);
0192 local_irq_save(flags);
0193 wbinvd();
0194 wrmsr(MSR_K6_WHCR, l, h);
0195 local_irq_restore(flags);
0196 pr_info("Enabling new style K6 write allocation for %d Mb\n",
0197 mbytes);
0198 }
0199
0200 return;
0201 }
0202
0203 if (c->x86_model == 10) {
0204
0205
0206 return;
0207 }
0208 #endif
0209 }
0210
0211 static void init_amd_k7(struct cpuinfo_x86 *c)
0212 {
0213 #ifdef CONFIG_X86_32
0214 u32 l, h;
0215
0216
0217
0218
0219
0220
0221 if (c->x86_model >= 6 && c->x86_model <= 10) {
0222 if (!cpu_has(c, X86_FEATURE_XMM)) {
0223 pr_info("Enabling disabled K7/SSE Support.\n");
0224 msr_clear_bit(MSR_K7_HWCR, 15);
0225 set_cpu_cap(c, X86_FEATURE_XMM);
0226 }
0227 }
0228
0229
0230
0231
0232
0233
0234 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
0235 rdmsr(MSR_K7_CLK_CTL, l, h);
0236 if ((l & 0xfff00000) != 0x20000000) {
0237 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
0238 l, ((l & 0x000fffff)|0x20000000));
0239 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
0240 }
0241 }
0242
0243
0244 if (!c->cpu_index)
0245 return;
0246
0247
0248
0249
0250
0251
0252 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
0253 (c->x86_stepping == 1)))
0254 return;
0255
0256
0257 if ((c->x86_model == 7) && (c->x86_stepping == 0))
0258 return;
0259
0260
0261
0262
0263
0264
0265
0266
0267 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
0268 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
0269 (c->x86_model > 7))
0270 if (cpu_has(c, X86_FEATURE_MP))
0271 return;
0272
0273
0274
0275
0276
0277
0278
0279 WARN_ONCE(1, "WARNING: This combination of AMD"
0280 " processors is not suitable for SMP.\n");
0281 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
0282 #endif
0283 }
0284
0285 #ifdef CONFIG_NUMA
0286
0287
0288
0289
0290 static int nearby_node(int apicid)
0291 {
0292 int i, node;
0293
0294 for (i = apicid - 1; i >= 0; i--) {
0295 node = __apicid_to_node[i];
0296 if (node != NUMA_NO_NODE && node_online(node))
0297 return node;
0298 }
0299 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
0300 node = __apicid_to_node[i];
0301 if (node != NUMA_NO_NODE && node_online(node))
0302 return node;
0303 }
0304 return first_node(node_online_map);
0305 }
0306 #endif
0307
0308
0309
0310
0311
0312
0313 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
0314 {
0315 u32 cus_per_node;
0316
0317 if (c->x86 >= 0x17)
0318 return;
0319
0320 cus_per_node = c->x86_max_cores / nodes_per_socket;
0321 c->cpu_core_id %= cus_per_node;
0322 }
0323
0324
0325
0326
0327
0328
0329
0330 static void amd_get_topology(struct cpuinfo_x86 *c)
0331 {
0332 int cpu = smp_processor_id();
0333
0334
0335 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
0336 int err;
0337 u32 eax, ebx, ecx, edx;
0338
0339 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
0340
0341 c->cpu_die_id = ecx & 0xff;
0342
0343 if (c->x86 == 0x15)
0344 c->cu_id = ebx & 0xff;
0345
0346 if (c->x86 >= 0x17) {
0347 c->cpu_core_id = ebx & 0xff;
0348
0349 if (smp_num_siblings > 1)
0350 c->x86_max_cores /= smp_num_siblings;
0351 }
0352
0353
0354
0355
0356
0357 err = detect_extended_topology(c);
0358 if (!err)
0359 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
0360
0361 cacheinfo_amd_init_llc_id(c, cpu);
0362
0363 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
0364 u64 value;
0365
0366 rdmsrl(MSR_FAM10H_NODE_ID, value);
0367 c->cpu_die_id = value & 7;
0368
0369 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
0370 } else
0371 return;
0372
0373 if (nodes_per_socket > 1) {
0374 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
0375 legacy_fixup_core_id(c);
0376 }
0377 }
0378
0379
0380
0381
0382
0383 static void amd_detect_cmp(struct cpuinfo_x86 *c)
0384 {
0385 unsigned bits;
0386 int cpu = smp_processor_id();
0387
0388 bits = c->x86_coreid_bits;
0389
0390 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
0391
0392 c->phys_proc_id = c->initial_apicid >> bits;
0393
0394 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
0395 }
0396
0397 u32 amd_get_nodes_per_socket(void)
0398 {
0399 return nodes_per_socket;
0400 }
0401 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
0402
0403 static void srat_detect_node(struct cpuinfo_x86 *c)
0404 {
0405 #ifdef CONFIG_NUMA
0406 int cpu = smp_processor_id();
0407 int node;
0408 unsigned apicid = c->apicid;
0409
0410 node = numa_cpu_node(cpu);
0411 if (node == NUMA_NO_NODE)
0412 node = get_llc_id(cpu);
0413
0414
0415
0416
0417
0418
0419 if (x86_cpuinit.fixup_cpu_id)
0420 x86_cpuinit.fixup_cpu_id(c, node);
0421
0422 if (!node_online(node)) {
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 int ht_nodeid = c->initial_apicid;
0443
0444 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
0445 node = __apicid_to_node[ht_nodeid];
0446
0447 if (!node_online(node))
0448 node = nearby_node(apicid);
0449 }
0450 numa_set_node(cpu, node);
0451 #endif
0452 }
0453
0454 static void early_init_amd_mc(struct cpuinfo_x86 *c)
0455 {
0456 #ifdef CONFIG_SMP
0457 unsigned bits, ecx;
0458
0459
0460 if (c->extended_cpuid_level < 0x80000008)
0461 return;
0462
0463 ecx = cpuid_ecx(0x80000008);
0464
0465 c->x86_max_cores = (ecx & 0xff) + 1;
0466
0467
0468 bits = (ecx >> 12) & 0xF;
0469
0470
0471 if (bits == 0) {
0472 while ((1 << bits) < c->x86_max_cores)
0473 bits++;
0474 }
0475
0476 c->x86_coreid_bits = bits;
0477 #endif
0478 }
0479
0480 static void bsp_init_amd(struct cpuinfo_x86 *c)
0481 {
0482 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
0483
0484 if (c->x86 > 0x10 ||
0485 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
0486 u64 val;
0487
0488 rdmsrl(MSR_K7_HWCR, val);
0489 if (!(val & BIT(24)))
0490 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
0491 }
0492 }
0493
0494 if (c->x86 == 0x15) {
0495 unsigned long upperbit;
0496 u32 cpuid, assoc;
0497
0498 cpuid = cpuid_edx(0x80000005);
0499 assoc = cpuid >> 16 & 0xff;
0500 upperbit = ((cpuid >> 24) << 10) / assoc;
0501
0502 va_align.mask = (upperbit - 1) & PAGE_MASK;
0503 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
0504
0505
0506 va_align.bits = get_random_int() & va_align.mask;
0507 }
0508
0509 if (cpu_has(c, X86_FEATURE_MWAITX))
0510 use_mwaitx_delay();
0511
0512 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
0513 u32 ecx;
0514
0515 ecx = cpuid_ecx(0x8000001e);
0516 __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
0517 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
0518 u64 value;
0519
0520 rdmsrl(MSR_FAM10H_NODE_ID, value);
0521 __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
0522 }
0523
0524 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
0525 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
0526 c->x86 >= 0x15 && c->x86 <= 0x17) {
0527 unsigned int bit;
0528
0529 switch (c->x86) {
0530 case 0x15: bit = 54; break;
0531 case 0x16: bit = 33; break;
0532 case 0x17: bit = 10; break;
0533 default: return;
0534 }
0535
0536
0537
0538
0539 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
0540 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
0541 setup_force_cpu_cap(X86_FEATURE_SSBD);
0542 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
0543 }
0544 }
0545
0546 resctrl_cpu_detect(c);
0547 }
0548
0549 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
0550 {
0551 u64 msr;
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
0568
0569 rdmsrl(MSR_AMD64_SYSCFG, msr);
0570 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
0571 goto clear_all;
0572
0573
0574
0575
0576
0577
0578 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
0579
0580 if (IS_ENABLED(CONFIG_X86_32))
0581 goto clear_all;
0582
0583 if (!sme_me_mask)
0584 setup_clear_cpu_cap(X86_FEATURE_SME);
0585
0586 rdmsrl(MSR_K7_HWCR, msr);
0587 if (!(msr & MSR_K7_HWCR_SMMLOCK))
0588 goto clear_sev;
0589
0590 return;
0591
0592 clear_all:
0593 setup_clear_cpu_cap(X86_FEATURE_SME);
0594 clear_sev:
0595 setup_clear_cpu_cap(X86_FEATURE_SEV);
0596 setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
0597 }
0598 }
0599
0600 static void early_init_amd(struct cpuinfo_x86 *c)
0601 {
0602 u64 value;
0603 u32 dummy;
0604
0605 early_init_amd_mc(c);
0606
0607 if (c->x86 >= 0xf)
0608 set_cpu_cap(c, X86_FEATURE_K8);
0609
0610 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
0611
0612
0613
0614
0615
0616 if (c->x86_power & (1 << 8)) {
0617 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
0618 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
0619 }
0620
0621
0622 if (c->x86_power & BIT(12))
0623 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
0624
0625
0626 if (c->x86_power & BIT(14))
0627 set_cpu_cap(c, X86_FEATURE_RAPL);
0628
0629 #ifdef CONFIG_X86_64
0630 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
0631 #else
0632
0633 if (c->x86 == 5)
0634 if (c->x86_model == 13 || c->x86_model == 9 ||
0635 (c->x86_model == 8 && c->x86_stepping >= 8))
0636 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
0637 #endif
0638 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
0639
0640
0641
0642
0643
0644
0645 if (boot_cpu_has(X86_FEATURE_APIC)) {
0646 if (c->x86 > 0x16)
0647 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
0648 else if (c->x86 >= 0xf) {
0649
0650 unsigned int val;
0651
0652 val = read_pci_config(0, 24, 0, 0x68);
0653 if ((val >> 17 & 0x3) == 0x3)
0654 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
0655 }
0656 }
0657 #endif
0658
0659
0660
0661
0662
0663
0664 set_cpu_cap(c, X86_FEATURE_VMMCALL);
0665
0666
0667 if (c->x86 == 0x16 && c->x86_model <= 0xf)
0668 msr_set_bit(MSR_AMD64_LS_CFG, 15);
0669
0670
0671
0672
0673
0674
0675
0676 if (cpu_has_amd_erratum(c, amd_erratum_400))
0677 set_cpu_bug(c, X86_BUG_AMD_E400);
0678
0679 early_detect_mem_encrypt(c);
0680
0681
0682 if (c->x86 == 0x15 &&
0683 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
0684 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
0685
0686 if (msr_set_bit(0xc0011005, 54) > 0) {
0687 rdmsrl(0xc0011005, value);
0688 if (value & BIT_64(54)) {
0689 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
0690 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
0691 }
0692 }
0693 }
0694
0695 if (cpu_has(c, X86_FEATURE_TOPOEXT))
0696 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
0697 }
0698
0699 static void init_amd_k8(struct cpuinfo_x86 *c)
0700 {
0701 u32 level;
0702 u64 value;
0703
0704
0705 level = cpuid_eax(1);
0706 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
0707 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
0708
0709
0710
0711
0712
0713
0714 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
0715 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
0716 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
0717 value &= ~BIT_64(32);
0718 wrmsrl_amd_safe(0xc001100d, value);
0719 }
0720 }
0721
0722 if (!c->x86_model_id[0])
0723 strcpy(c->x86_model_id, "Hammer");
0724
0725 #ifdef CONFIG_SMP
0726
0727
0728
0729
0730
0731
0732
0733 msr_set_bit(MSR_K7_HWCR, 6);
0734 #endif
0735 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
0736 }
0737
0738 static void init_amd_gh(struct cpuinfo_x86 *c)
0739 {
0740 #ifdef CONFIG_MMCONF_FAM10H
0741
0742 if (c == &boot_cpu_data)
0743 check_enable_amd_mmconf_dmi();
0744
0745 fam10h_check_enable_mmcfg();
0746 #endif
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
0768
0769 if (cpu_has_amd_erratum(c, amd_erratum_383))
0770 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
0771 }
0772
0773 #define MSR_AMD64_DE_CFG 0xC0011029
0774
0775 static void init_amd_ln(struct cpuinfo_x86 *c)
0776 {
0777
0778
0779
0780
0781 msr_set_bit(MSR_AMD64_DE_CFG, 31);
0782 }
0783
0784 static bool rdrand_force;
0785
0786 static int __init rdrand_cmdline(char *str)
0787 {
0788 if (!str)
0789 return -EINVAL;
0790
0791 if (!strcmp(str, "force"))
0792 rdrand_force = true;
0793 else
0794 return -EINVAL;
0795
0796 return 0;
0797 }
0798 early_param("rdrand", rdrand_cmdline);
0799
0800 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
0801 {
0802
0803
0804
0805
0806
0807 if (!IS_ENABLED(CONFIG_PM_SLEEP))
0808 return;
0809
0810
0811
0812
0813
0814 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
0815 return;
0816
0817 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
0818
0819
0820
0821
0822
0823 if (cpuid_ecx(1) & BIT(30)) {
0824 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
0825 return;
0826 }
0827
0828 clear_cpu_cap(c, X86_FEATURE_RDRAND);
0829 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
0830 }
0831
0832 static void init_amd_jg(struct cpuinfo_x86 *c)
0833 {
0834
0835
0836
0837
0838
0839 clear_rdrand_cpuid_bit(c);
0840 }
0841
0842 static void init_amd_bd(struct cpuinfo_x86 *c)
0843 {
0844 u64 value;
0845
0846
0847
0848
0849
0850 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
0851 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
0852 value |= 0x1E;
0853 wrmsrl_safe(MSR_F15H_IC_CFG, value);
0854 }
0855 }
0856
0857
0858
0859
0860
0861
0862 clear_rdrand_cpuid_bit(c);
0863 }
0864
0865 void init_spectral_chicken(struct cpuinfo_x86 *c)
0866 {
0867 #ifdef CONFIG_CPU_UNRET_ENTRY
0868 u64 value;
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
0879 if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
0880 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
0881 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
0882 }
0883 }
0884 #endif
0885 }
0886
0887 static void init_amd_zn(struct cpuinfo_x86 *c)
0888 {
0889 set_cpu_cap(c, X86_FEATURE_ZEN);
0890
0891 #ifdef CONFIG_NUMA
0892 node_reclaim_distance = 32;
0893 #endif
0894
0895
0896 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
0897
0898
0899 if (!cpu_has(c, X86_FEATURE_CPB))
0900 set_cpu_cap(c, X86_FEATURE_CPB);
0901
0902
0903
0904
0905
0906
0907 if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
0908 set_cpu_cap(c, X86_FEATURE_BTC_NO);
0909 }
0910 }
0911
0912 static void init_amd(struct cpuinfo_x86 *c)
0913 {
0914 early_init_amd(c);
0915
0916
0917
0918
0919
0920 clear_cpu_cap(c, 0*32+31);
0921
0922 if (c->x86 >= 0x10)
0923 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
0924
0925
0926 c->apicid = hard_smp_processor_id();
0927
0928
0929 if (c->x86 < 6)
0930 clear_cpu_cap(c, X86_FEATURE_MCE);
0931
0932 switch (c->x86) {
0933 case 4: init_amd_k5(c); break;
0934 case 5: init_amd_k6(c); break;
0935 case 6: init_amd_k7(c); break;
0936 case 0xf: init_amd_k8(c); break;
0937 case 0x10: init_amd_gh(c); break;
0938 case 0x12: init_amd_ln(c); break;
0939 case 0x15: init_amd_bd(c); break;
0940 case 0x16: init_amd_jg(c); break;
0941 case 0x17: init_spectral_chicken(c);
0942 fallthrough;
0943 case 0x19: init_amd_zn(c); break;
0944 }
0945
0946
0947
0948
0949
0950 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
0951 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
0952
0953 cpu_detect_cache_sizes(c);
0954
0955 amd_detect_cmp(c);
0956 amd_get_topology(c);
0957 srat_detect_node(c);
0958
0959 init_amd_cacheinfo(c);
0960
0961 if (cpu_has(c, X86_FEATURE_XMM2)) {
0962
0963
0964
0965
0966
0967
0968 msr_set_bit(MSR_F10H_DECFG,
0969 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
0970
0971
0972 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
0973 }
0974
0975
0976
0977
0978
0979 if (c->x86 > 0x11)
0980 set_cpu_cap(c, X86_FEATURE_ARAT);
0981
0982
0983 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
0984 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
0985 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
0986
0987
0988 if (!cpu_has(c, X86_FEATURE_XENPV))
0989 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
0990
0991
0992
0993
0994
0995
0996 if (cpu_has(c, X86_FEATURE_IRPERF) &&
0997 !cpu_has_amd_erratum(c, amd_erratum_1054))
0998 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
0999
1000 check_null_seg_clears_base(c);
1001 }
1002
1003 #ifdef CONFIG_X86_32
1004 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1005 {
1006
1007 if (c->x86 == 6) {
1008
1009 if (c->x86_model == 3 && c->x86_stepping == 0)
1010 size = 64;
1011
1012 if (c->x86_model == 4 &&
1013 (c->x86_stepping == 0 || c->x86_stepping == 1))
1014 size = 256;
1015 }
1016 return size;
1017 }
1018 #endif
1019
1020 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1021 {
1022 u32 ebx, eax, ecx, edx;
1023 u16 mask = 0xfff;
1024
1025 if (c->x86 < 0xf)
1026 return;
1027
1028 if (c->extended_cpuid_level < 0x80000006)
1029 return;
1030
1031 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1032
1033 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1034 tlb_lli_4k[ENTRIES] = ebx & mask;
1035
1036
1037
1038
1039
1040 if (c->x86 == 0xf) {
1041 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1042 mask = 0xff;
1043 }
1044
1045
1046 if (!((eax >> 16) & mask))
1047 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1048 else
1049 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1050
1051
1052 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1053
1054
1055 if (!(eax & mask)) {
1056
1057 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1058 tlb_lli_2m[ENTRIES] = 1024;
1059 } else {
1060 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1061 tlb_lli_2m[ENTRIES] = eax & 0xff;
1062 }
1063 } else
1064 tlb_lli_2m[ENTRIES] = eax & mask;
1065
1066 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1067 }
1068
1069 static const struct cpu_dev amd_cpu_dev = {
1070 .c_vendor = "AMD",
1071 .c_ident = { "AuthenticAMD" },
1072 #ifdef CONFIG_X86_32
1073 .legacy_models = {
1074 { .family = 4, .model_names =
1075 {
1076 [3] = "486 DX/2",
1077 [7] = "486 DX/2-WB",
1078 [8] = "486 DX/4",
1079 [9] = "486 DX/4-WB",
1080 [14] = "Am5x86-WT",
1081 [15] = "Am5x86-WB"
1082 }
1083 },
1084 },
1085 .legacy_cache_size = amd_size_cache,
1086 #endif
1087 .c_early_init = early_init_amd,
1088 .c_detect_tlb = cpu_detect_tlb_amd,
1089 .c_bsp_init = bsp_init_amd,
1090 .c_init = init_amd,
1091 .c_x86_vendor = X86_VENDOR_AMD,
1092 };
1093
1094 cpu_dev_register(amd_cpu_dev);
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1114 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1115 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1116 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1117 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1118 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1119 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1120
1121 static const int amd_erratum_400[] =
1122 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1123 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1124
1125 static const int amd_erratum_383[] =
1126 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1127
1128
1129 static const int amd_erratum_1054[] =
1130 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1131
1132 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1133 {
1134 int osvw_id = *erratum++;
1135 u32 range;
1136 u32 ms;
1137
1138 if (osvw_id >= 0 && osvw_id < 65536 &&
1139 cpu_has(cpu, X86_FEATURE_OSVW)) {
1140 u64 osvw_len;
1141
1142 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1143 if (osvw_id < osvw_len) {
1144 u64 osvw_bits;
1145
1146 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1147 osvw_bits);
1148 return osvw_bits & (1ULL << (osvw_id & 0x3f));
1149 }
1150 }
1151
1152
1153 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1154 while ((range = *erratum++))
1155 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1156 (ms >= AMD_MODEL_RANGE_START(range)) &&
1157 (ms <= AMD_MODEL_RANGE_END(range)))
1158 return true;
1159
1160 return false;
1161 }
1162
1163 void set_dr_addr_mask(unsigned long mask, int dr)
1164 {
1165 if (!boot_cpu_has(X86_FEATURE_BPEXT))
1166 return;
1167
1168 switch (dr) {
1169 case 0:
1170 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1171 break;
1172 case 1:
1173 case 2:
1174 case 3:
1175 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1176 break;
1177 default:
1178 break;
1179 }
1180 }
1181
1182 u32 amd_get_highest_perf(void)
1183 {
1184 struct cpuinfo_x86 *c = &boot_cpu_data;
1185
1186 if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1187 (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1188 return 166;
1189
1190 if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1191 (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1192 return 166;
1193
1194 return 255;
1195 }
1196 EXPORT_SYMBOL_GPL(amd_get_highest_perf);