0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/init.h>
0012 #include <linux/utsname.h>
0013 #include <linux/cpu.h>
0014 #include <linux/module.h>
0015 #include <linux/nospec.h>
0016 #include <linux/prctl.h>
0017 #include <linux/sched/smt.h>
0018 #include <linux/pgtable.h>
0019 #include <linux/bpf.h>
0020
0021 #include <asm/spec-ctrl.h>
0022 #include <asm/cmdline.h>
0023 #include <asm/bugs.h>
0024 #include <asm/processor.h>
0025 #include <asm/processor-flags.h>
0026 #include <asm/fpu/api.h>
0027 #include <asm/msr.h>
0028 #include <asm/vmx.h>
0029 #include <asm/paravirt.h>
0030 #include <asm/alternative.h>
0031 #include <asm/set_memory.h>
0032 #include <asm/intel-family.h>
0033 #include <asm/e820/api.h>
0034 #include <asm/hypervisor.h>
0035 #include <asm/tlbflush.h>
0036
0037 #include "cpu.h"
0038
0039 static void __init spectre_v1_select_mitigation(void);
0040 static void __init spectre_v2_select_mitigation(void);
0041 static void __init retbleed_select_mitigation(void);
0042 static void __init spectre_v2_user_select_mitigation(void);
0043 static void __init ssb_select_mitigation(void);
0044 static void __init l1tf_select_mitigation(void);
0045 static void __init mds_select_mitigation(void);
0046 static void __init md_clear_update_mitigation(void);
0047 static void __init md_clear_select_mitigation(void);
0048 static void __init taa_select_mitigation(void);
0049 static void __init mmio_select_mitigation(void);
0050 static void __init srbds_select_mitigation(void);
0051 static void __init l1d_flush_select_mitigation(void);
0052
0053
0054 u64 x86_spec_ctrl_base;
0055 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
0056
0057
0058 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
0059 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
0060
0061 static DEFINE_MUTEX(spec_ctrl_mutex);
0062
0063
0064
0065
0066
0067 void write_spec_ctrl_current(u64 val, bool force)
0068 {
0069 if (this_cpu_read(x86_spec_ctrl_current) == val)
0070 return;
0071
0072 this_cpu_write(x86_spec_ctrl_current, val);
0073
0074
0075
0076
0077
0078 if (force || !cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
0079 wrmsrl(MSR_IA32_SPEC_CTRL, val);
0080 }
0081
0082 u64 spec_ctrl_current(void)
0083 {
0084 return this_cpu_read(x86_spec_ctrl_current);
0085 }
0086 EXPORT_SYMBOL_GPL(spec_ctrl_current);
0087
0088
0089
0090
0091
0092 u64 __ro_after_init x86_amd_ls_cfg_base;
0093 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
0094
0095
0096 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
0097
0098 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
0099
0100 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
0101
0102
0103 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
0104 EXPORT_SYMBOL_GPL(mds_user_clear);
0105
0106 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
0107 EXPORT_SYMBOL_GPL(mds_idle_clear);
0108
0109
0110
0111
0112
0113
0114 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
0115
0116
0117 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
0118 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
0119
0120 void __init check_bugs(void)
0121 {
0122 identify_boot_cpu();
0123
0124
0125
0126
0127
0128 cpu_smt_check_topology();
0129
0130 if (!IS_ENABLED(CONFIG_SMP)) {
0131 pr_info("CPU: ");
0132 print_cpu_info(&boot_cpu_data);
0133 }
0134
0135
0136
0137
0138
0139
0140 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
0141 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
0142
0143
0144 spectre_v1_select_mitigation();
0145 spectre_v2_select_mitigation();
0146
0147
0148
0149
0150
0151 retbleed_select_mitigation();
0152
0153
0154
0155
0156
0157 spectre_v2_user_select_mitigation();
0158 ssb_select_mitigation();
0159 l1tf_select_mitigation();
0160 md_clear_select_mitigation();
0161 srbds_select_mitigation();
0162 l1d_flush_select_mitigation();
0163
0164 arch_smt_update();
0165
0166 #ifdef CONFIG_X86_32
0167
0168
0169
0170
0171
0172
0173
0174 if (boot_cpu_data.x86 < 4)
0175 panic("Kernel requires i486+ for 'invlpg' and other features");
0176
0177 init_utsname()->machine[1] =
0178 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
0179 alternative_instructions();
0180
0181 fpu__init_check_bugs();
0182 #else
0183 alternative_instructions();
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 if (!direct_gbpages)
0194 set_memory_4k((unsigned long)__va(0), 1);
0195 #endif
0196 }
0197
0198
0199
0200
0201
0202 void
0203 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
0204 {
0205 u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
0206 struct thread_info *ti = current_thread_info();
0207
0208 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
0209 if (hostval != guestval) {
0210 msrval = setguest ? guestval : hostval;
0211 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
0212 }
0213 }
0214
0215
0216
0217
0218
0219 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
0220 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
0221 return;
0222
0223
0224
0225
0226
0227
0228 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
0229 hostval = SPEC_CTRL_SSBD;
0230 else
0231 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
0232
0233
0234 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
0235
0236 if (hostval != guestval) {
0237 unsigned long tif;
0238
0239 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
0240 ssbd_spec_ctrl_to_tif(hostval);
0241
0242 speculation_ctrl_update(tif);
0243 }
0244 }
0245 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
0246
0247 static void x86_amd_ssb_disable(void)
0248 {
0249 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
0250
0251 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
0252 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
0253 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
0254 wrmsrl(MSR_AMD64_LS_CFG, msrval);
0255 }
0256
0257 #undef pr_fmt
0258 #define pr_fmt(fmt) "MDS: " fmt
0259
0260
0261 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
0262 static bool mds_nosmt __ro_after_init = false;
0263
0264 static const char * const mds_strings[] = {
0265 [MDS_MITIGATION_OFF] = "Vulnerable",
0266 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
0267 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
0268 };
0269
0270 static void __init mds_select_mitigation(void)
0271 {
0272 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
0273 mds_mitigation = MDS_MITIGATION_OFF;
0274 return;
0275 }
0276
0277 if (mds_mitigation == MDS_MITIGATION_FULL) {
0278 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
0279 mds_mitigation = MDS_MITIGATION_VMWERV;
0280
0281 static_branch_enable(&mds_user_clear);
0282
0283 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
0284 (mds_nosmt || cpu_mitigations_auto_nosmt()))
0285 cpu_smt_disable(false);
0286 }
0287 }
0288
0289 static int __init mds_cmdline(char *str)
0290 {
0291 if (!boot_cpu_has_bug(X86_BUG_MDS))
0292 return 0;
0293
0294 if (!str)
0295 return -EINVAL;
0296
0297 if (!strcmp(str, "off"))
0298 mds_mitigation = MDS_MITIGATION_OFF;
0299 else if (!strcmp(str, "full"))
0300 mds_mitigation = MDS_MITIGATION_FULL;
0301 else if (!strcmp(str, "full,nosmt")) {
0302 mds_mitigation = MDS_MITIGATION_FULL;
0303 mds_nosmt = true;
0304 }
0305
0306 return 0;
0307 }
0308 early_param("mds", mds_cmdline);
0309
0310 #undef pr_fmt
0311 #define pr_fmt(fmt) "TAA: " fmt
0312
0313 enum taa_mitigations {
0314 TAA_MITIGATION_OFF,
0315 TAA_MITIGATION_UCODE_NEEDED,
0316 TAA_MITIGATION_VERW,
0317 TAA_MITIGATION_TSX_DISABLED,
0318 };
0319
0320
0321 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
0322 static bool taa_nosmt __ro_after_init;
0323
0324 static const char * const taa_strings[] = {
0325 [TAA_MITIGATION_OFF] = "Vulnerable",
0326 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
0327 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
0328 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
0329 };
0330
0331 static void __init taa_select_mitigation(void)
0332 {
0333 u64 ia32_cap;
0334
0335 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
0336 taa_mitigation = TAA_MITIGATION_OFF;
0337 return;
0338 }
0339
0340
0341 if (!boot_cpu_has(X86_FEATURE_RTM)) {
0342 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
0343 return;
0344 }
0345
0346 if (cpu_mitigations_off()) {
0347 taa_mitigation = TAA_MITIGATION_OFF;
0348 return;
0349 }
0350
0351
0352
0353
0354
0355 if (taa_mitigation == TAA_MITIGATION_OFF &&
0356 mds_mitigation == MDS_MITIGATION_OFF)
0357 return;
0358
0359 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
0360 taa_mitigation = TAA_MITIGATION_VERW;
0361 else
0362 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 ia32_cap = x86_read_arch_cap_msr();
0374 if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
0375 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
0376 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
0377
0378
0379
0380
0381
0382
0383
0384
0385 static_branch_enable(&mds_user_clear);
0386
0387 if (taa_nosmt || cpu_mitigations_auto_nosmt())
0388 cpu_smt_disable(false);
0389 }
0390
0391 static int __init tsx_async_abort_parse_cmdline(char *str)
0392 {
0393 if (!boot_cpu_has_bug(X86_BUG_TAA))
0394 return 0;
0395
0396 if (!str)
0397 return -EINVAL;
0398
0399 if (!strcmp(str, "off")) {
0400 taa_mitigation = TAA_MITIGATION_OFF;
0401 } else if (!strcmp(str, "full")) {
0402 taa_mitigation = TAA_MITIGATION_VERW;
0403 } else if (!strcmp(str, "full,nosmt")) {
0404 taa_mitigation = TAA_MITIGATION_VERW;
0405 taa_nosmt = true;
0406 }
0407
0408 return 0;
0409 }
0410 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
0411
0412 #undef pr_fmt
0413 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
0414
0415 enum mmio_mitigations {
0416 MMIO_MITIGATION_OFF,
0417 MMIO_MITIGATION_UCODE_NEEDED,
0418 MMIO_MITIGATION_VERW,
0419 };
0420
0421
0422 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
0423 static bool mmio_nosmt __ro_after_init = false;
0424
0425 static const char * const mmio_strings[] = {
0426 [MMIO_MITIGATION_OFF] = "Vulnerable",
0427 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
0428 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
0429 };
0430
0431 static void __init mmio_select_mitigation(void)
0432 {
0433 u64 ia32_cap;
0434
0435 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
0436 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
0437 cpu_mitigations_off()) {
0438 mmio_mitigation = MMIO_MITIGATION_OFF;
0439 return;
0440 }
0441
0442 if (mmio_mitigation == MMIO_MITIGATION_OFF)
0443 return;
0444
0445 ia32_cap = x86_read_arch_cap_msr();
0446
0447
0448
0449
0450
0451 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
0452 boot_cpu_has(X86_FEATURE_RTM)))
0453 static_branch_enable(&mds_user_clear);
0454 else
0455 static_branch_enable(&mmio_stale_data_clear);
0456
0457
0458
0459
0460
0461
0462 if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
0463 static_branch_enable(&mds_idle_clear);
0464
0465
0466
0467
0468
0469
0470
0471
0472 if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
0473 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
0474 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
0475 !(ia32_cap & ARCH_CAP_MDS_NO)))
0476 mmio_mitigation = MMIO_MITIGATION_VERW;
0477 else
0478 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
0479
0480 if (mmio_nosmt || cpu_mitigations_auto_nosmt())
0481 cpu_smt_disable(false);
0482 }
0483
0484 static int __init mmio_stale_data_parse_cmdline(char *str)
0485 {
0486 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
0487 return 0;
0488
0489 if (!str)
0490 return -EINVAL;
0491
0492 if (!strcmp(str, "off")) {
0493 mmio_mitigation = MMIO_MITIGATION_OFF;
0494 } else if (!strcmp(str, "full")) {
0495 mmio_mitigation = MMIO_MITIGATION_VERW;
0496 } else if (!strcmp(str, "full,nosmt")) {
0497 mmio_mitigation = MMIO_MITIGATION_VERW;
0498 mmio_nosmt = true;
0499 }
0500
0501 return 0;
0502 }
0503 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
0504
0505 #undef pr_fmt
0506 #define pr_fmt(fmt) "" fmt
0507
0508 static void __init md_clear_update_mitigation(void)
0509 {
0510 if (cpu_mitigations_off())
0511 return;
0512
0513 if (!static_key_enabled(&mds_user_clear))
0514 goto out;
0515
0516
0517
0518
0519
0520 if (mds_mitigation == MDS_MITIGATION_OFF &&
0521 boot_cpu_has_bug(X86_BUG_MDS)) {
0522 mds_mitigation = MDS_MITIGATION_FULL;
0523 mds_select_mitigation();
0524 }
0525 if (taa_mitigation == TAA_MITIGATION_OFF &&
0526 boot_cpu_has_bug(X86_BUG_TAA)) {
0527 taa_mitigation = TAA_MITIGATION_VERW;
0528 taa_select_mitigation();
0529 }
0530 if (mmio_mitigation == MMIO_MITIGATION_OFF &&
0531 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
0532 mmio_mitigation = MMIO_MITIGATION_VERW;
0533 mmio_select_mitigation();
0534 }
0535 out:
0536 if (boot_cpu_has_bug(X86_BUG_MDS))
0537 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
0538 if (boot_cpu_has_bug(X86_BUG_TAA))
0539 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
0540 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
0541 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
0542 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
0543 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
0544 }
0545
0546 static void __init md_clear_select_mitigation(void)
0547 {
0548 mds_select_mitigation();
0549 taa_select_mitigation();
0550 mmio_select_mitigation();
0551
0552
0553
0554
0555
0556
0557 md_clear_update_mitigation();
0558 }
0559
0560 #undef pr_fmt
0561 #define pr_fmt(fmt) "SRBDS: " fmt
0562
0563 enum srbds_mitigations {
0564 SRBDS_MITIGATION_OFF,
0565 SRBDS_MITIGATION_UCODE_NEEDED,
0566 SRBDS_MITIGATION_FULL,
0567 SRBDS_MITIGATION_TSX_OFF,
0568 SRBDS_MITIGATION_HYPERVISOR,
0569 };
0570
0571 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
0572
0573 static const char * const srbds_strings[] = {
0574 [SRBDS_MITIGATION_OFF] = "Vulnerable",
0575 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
0576 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
0577 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
0578 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
0579 };
0580
0581 static bool srbds_off;
0582
0583 void update_srbds_msr(void)
0584 {
0585 u64 mcu_ctrl;
0586
0587 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
0588 return;
0589
0590 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
0591 return;
0592
0593 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
0594 return;
0595
0596
0597
0598
0599
0600 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
0601 return;
0602
0603 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
0604
0605 switch (srbds_mitigation) {
0606 case SRBDS_MITIGATION_OFF:
0607 case SRBDS_MITIGATION_TSX_OFF:
0608 mcu_ctrl |= RNGDS_MITG_DIS;
0609 break;
0610 case SRBDS_MITIGATION_FULL:
0611 mcu_ctrl &= ~RNGDS_MITG_DIS;
0612 break;
0613 default:
0614 break;
0615 }
0616
0617 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
0618 }
0619
0620 static void __init srbds_select_mitigation(void)
0621 {
0622 u64 ia32_cap;
0623
0624 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
0625 return;
0626
0627
0628
0629
0630
0631
0632 ia32_cap = x86_read_arch_cap_msr();
0633 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
0634 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
0635 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
0636 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
0637 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
0638 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
0639 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
0640 else if (cpu_mitigations_off() || srbds_off)
0641 srbds_mitigation = SRBDS_MITIGATION_OFF;
0642
0643 update_srbds_msr();
0644 pr_info("%s\n", srbds_strings[srbds_mitigation]);
0645 }
0646
0647 static int __init srbds_parse_cmdline(char *str)
0648 {
0649 if (!str)
0650 return -EINVAL;
0651
0652 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
0653 return 0;
0654
0655 srbds_off = !strcmp(str, "off");
0656 return 0;
0657 }
0658 early_param("srbds", srbds_parse_cmdline);
0659
0660 #undef pr_fmt
0661 #define pr_fmt(fmt) "L1D Flush : " fmt
0662
0663 enum l1d_flush_mitigations {
0664 L1D_FLUSH_OFF = 0,
0665 L1D_FLUSH_ON,
0666 };
0667
0668 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
0669
0670 static void __init l1d_flush_select_mitigation(void)
0671 {
0672 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
0673 return;
0674
0675 static_branch_enable(&switch_mm_cond_l1d_flush);
0676 pr_info("Conditional flush on switch_mm() enabled\n");
0677 }
0678
0679 static int __init l1d_flush_parse_cmdline(char *str)
0680 {
0681 if (!strcmp(str, "on"))
0682 l1d_flush_mitigation = L1D_FLUSH_ON;
0683
0684 return 0;
0685 }
0686 early_param("l1d_flush", l1d_flush_parse_cmdline);
0687
0688 #undef pr_fmt
0689 #define pr_fmt(fmt) "Spectre V1 : " fmt
0690
0691 enum spectre_v1_mitigation {
0692 SPECTRE_V1_MITIGATION_NONE,
0693 SPECTRE_V1_MITIGATION_AUTO,
0694 };
0695
0696 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
0697 SPECTRE_V1_MITIGATION_AUTO;
0698
0699 static const char * const spectre_v1_strings[] = {
0700 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
0701 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
0702 };
0703
0704
0705
0706
0707
0708 static bool smap_works_speculatively(void)
0709 {
0710 if (!boot_cpu_has(X86_FEATURE_SMAP))
0711 return false;
0712
0713
0714
0715
0716
0717
0718
0719 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
0720 return false;
0721
0722 return true;
0723 }
0724
0725 static void __init spectre_v1_select_mitigation(void)
0726 {
0727 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
0728 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
0729 return;
0730 }
0731
0732 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
0746 !smap_works_speculatively()) {
0747
0748
0749
0750
0751
0752
0753
0754
0755 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
0756 !boot_cpu_has(X86_FEATURE_PTI))
0757 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
0758
0759
0760
0761
0762
0763
0764 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
0765 }
0766 }
0767
0768 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
0769 }
0770
0771 static int __init nospectre_v1_cmdline(char *str)
0772 {
0773 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
0774 return 0;
0775 }
0776 early_param("nospectre_v1", nospectre_v1_cmdline);
0777
0778 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
0779 SPECTRE_V2_NONE;
0780
0781 #undef pr_fmt
0782 #define pr_fmt(fmt) "RETBleed: " fmt
0783
0784 enum retbleed_mitigation {
0785 RETBLEED_MITIGATION_NONE,
0786 RETBLEED_MITIGATION_UNRET,
0787 RETBLEED_MITIGATION_IBPB,
0788 RETBLEED_MITIGATION_IBRS,
0789 RETBLEED_MITIGATION_EIBRS,
0790 };
0791
0792 enum retbleed_mitigation_cmd {
0793 RETBLEED_CMD_OFF,
0794 RETBLEED_CMD_AUTO,
0795 RETBLEED_CMD_UNRET,
0796 RETBLEED_CMD_IBPB,
0797 };
0798
0799 static const char * const retbleed_strings[] = {
0800 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
0801 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
0802 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
0803 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
0804 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
0805 };
0806
0807 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
0808 RETBLEED_MITIGATION_NONE;
0809 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
0810 RETBLEED_CMD_AUTO;
0811
0812 static int __ro_after_init retbleed_nosmt = false;
0813
0814 static int __init retbleed_parse_cmdline(char *str)
0815 {
0816 if (!str)
0817 return -EINVAL;
0818
0819 while (str) {
0820 char *next = strchr(str, ',');
0821 if (next) {
0822 *next = 0;
0823 next++;
0824 }
0825
0826 if (!strcmp(str, "off")) {
0827 retbleed_cmd = RETBLEED_CMD_OFF;
0828 } else if (!strcmp(str, "auto")) {
0829 retbleed_cmd = RETBLEED_CMD_AUTO;
0830 } else if (!strcmp(str, "unret")) {
0831 retbleed_cmd = RETBLEED_CMD_UNRET;
0832 } else if (!strcmp(str, "ibpb")) {
0833 retbleed_cmd = RETBLEED_CMD_IBPB;
0834 } else if (!strcmp(str, "nosmt")) {
0835 retbleed_nosmt = true;
0836 } else {
0837 pr_err("Ignoring unknown retbleed option (%s).", str);
0838 }
0839
0840 str = next;
0841 }
0842
0843 return 0;
0844 }
0845 early_param("retbleed", retbleed_parse_cmdline);
0846
0847 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
0848 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
0849
0850 static void __init retbleed_select_mitigation(void)
0851 {
0852 bool mitigate_smt = false;
0853
0854 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
0855 return;
0856
0857 switch (retbleed_cmd) {
0858 case RETBLEED_CMD_OFF:
0859 return;
0860
0861 case RETBLEED_CMD_UNRET:
0862 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
0863 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
0864 } else {
0865 pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
0866 goto do_cmd_auto;
0867 }
0868 break;
0869
0870 case RETBLEED_CMD_IBPB:
0871 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
0872 pr_err("WARNING: CPU does not support IBPB.\n");
0873 goto do_cmd_auto;
0874 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
0875 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
0876 } else {
0877 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
0878 goto do_cmd_auto;
0879 }
0880 break;
0881
0882 do_cmd_auto:
0883 case RETBLEED_CMD_AUTO:
0884 default:
0885 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
0886 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
0887 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
0888 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
0889 else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
0890 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
0891 }
0892
0893
0894
0895
0896
0897
0898
0899 break;
0900 }
0901
0902 switch (retbleed_mitigation) {
0903 case RETBLEED_MITIGATION_UNRET:
0904 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
0905 setup_force_cpu_cap(X86_FEATURE_UNRET);
0906
0907 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
0908 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
0909 pr_err(RETBLEED_UNTRAIN_MSG);
0910
0911 mitigate_smt = true;
0912 break;
0913
0914 case RETBLEED_MITIGATION_IBPB:
0915 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
0916 mitigate_smt = true;
0917 break;
0918
0919 default:
0920 break;
0921 }
0922
0923 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
0924 (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
0925 cpu_smt_disable(false);
0926
0927
0928
0929
0930
0931 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
0932 switch (spectre_v2_enabled) {
0933 case SPECTRE_V2_IBRS:
0934 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
0935 break;
0936 case SPECTRE_V2_EIBRS:
0937 case SPECTRE_V2_EIBRS_RETPOLINE:
0938 case SPECTRE_V2_EIBRS_LFENCE:
0939 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
0940 break;
0941 default:
0942 pr_err(RETBLEED_INTEL_MSG);
0943 }
0944 }
0945
0946 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
0947 }
0948
0949 #undef pr_fmt
0950 #define pr_fmt(fmt) "Spectre V2 : " fmt
0951
0952 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
0953 SPECTRE_V2_USER_NONE;
0954 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
0955 SPECTRE_V2_USER_NONE;
0956
0957 #ifdef CONFIG_RETPOLINE
0958 static bool spectre_v2_bad_module;
0959
0960 bool retpoline_module_ok(bool has_retpoline)
0961 {
0962 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
0963 return true;
0964
0965 pr_err("System may be vulnerable to spectre v2\n");
0966 spectre_v2_bad_module = true;
0967 return false;
0968 }
0969
0970 static inline const char *spectre_v2_module_string(void)
0971 {
0972 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
0973 }
0974 #else
0975 static inline const char *spectre_v2_module_string(void) { return ""; }
0976 #endif
0977
0978 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
0979 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
0980 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
0981 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
0982
0983 #ifdef CONFIG_BPF_SYSCALL
0984 void unpriv_ebpf_notify(int new_state)
0985 {
0986 if (new_state)
0987 return;
0988
0989
0990
0991 switch (spectre_v2_enabled) {
0992 case SPECTRE_V2_EIBRS:
0993 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
0994 break;
0995 case SPECTRE_V2_EIBRS_LFENCE:
0996 if (sched_smt_active())
0997 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
0998 break;
0999 default:
1000 break;
1001 }
1002 }
1003 #endif
1004
1005 static inline bool match_option(const char *arg, int arglen, const char *opt)
1006 {
1007 int len = strlen(opt);
1008
1009 return len == arglen && !strncmp(arg, opt, len);
1010 }
1011
1012
1013 enum spectre_v2_mitigation_cmd {
1014 SPECTRE_V2_CMD_NONE,
1015 SPECTRE_V2_CMD_AUTO,
1016 SPECTRE_V2_CMD_FORCE,
1017 SPECTRE_V2_CMD_RETPOLINE,
1018 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1019 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1020 SPECTRE_V2_CMD_EIBRS,
1021 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1022 SPECTRE_V2_CMD_EIBRS_LFENCE,
1023 SPECTRE_V2_CMD_IBRS,
1024 };
1025
1026 enum spectre_v2_user_cmd {
1027 SPECTRE_V2_USER_CMD_NONE,
1028 SPECTRE_V2_USER_CMD_AUTO,
1029 SPECTRE_V2_USER_CMD_FORCE,
1030 SPECTRE_V2_USER_CMD_PRCTL,
1031 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1032 SPECTRE_V2_USER_CMD_SECCOMP,
1033 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1034 };
1035
1036 static const char * const spectre_v2_user_strings[] = {
1037 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1038 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1039 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1040 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1041 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1042 };
1043
1044 static const struct {
1045 const char *option;
1046 enum spectre_v2_user_cmd cmd;
1047 bool secure;
1048 } v2_user_options[] __initconst = {
1049 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
1050 { "off", SPECTRE_V2_USER_CMD_NONE, false },
1051 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
1052 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
1053 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
1054 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
1055 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
1056 };
1057
1058 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1059 {
1060 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1061 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1062 }
1063
1064 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
1065
1066 static enum spectre_v2_user_cmd __init
1067 spectre_v2_parse_user_cmdline(void)
1068 {
1069 char arg[20];
1070 int ret, i;
1071
1072 switch (spectre_v2_cmd) {
1073 case SPECTRE_V2_CMD_NONE:
1074 return SPECTRE_V2_USER_CMD_NONE;
1075 case SPECTRE_V2_CMD_FORCE:
1076 return SPECTRE_V2_USER_CMD_FORCE;
1077 default:
1078 break;
1079 }
1080
1081 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1082 arg, sizeof(arg));
1083 if (ret < 0)
1084 return SPECTRE_V2_USER_CMD_AUTO;
1085
1086 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1087 if (match_option(arg, ret, v2_user_options[i].option)) {
1088 spec_v2_user_print_cond(v2_user_options[i].option,
1089 v2_user_options[i].secure);
1090 return v2_user_options[i].cmd;
1091 }
1092 }
1093
1094 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
1095 return SPECTRE_V2_USER_CMD_AUTO;
1096 }
1097
1098 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1099 {
1100 return mode == SPECTRE_V2_IBRS ||
1101 mode == SPECTRE_V2_EIBRS ||
1102 mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1103 mode == SPECTRE_V2_EIBRS_LFENCE;
1104 }
1105
1106 static void __init
1107 spectre_v2_user_select_mitigation(void)
1108 {
1109 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
1110 bool smt_possible = IS_ENABLED(CONFIG_SMP);
1111 enum spectre_v2_user_cmd cmd;
1112
1113 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1114 return;
1115
1116 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
1117 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
1118 smt_possible = false;
1119
1120 cmd = spectre_v2_parse_user_cmdline();
1121 switch (cmd) {
1122 case SPECTRE_V2_USER_CMD_NONE:
1123 goto set_mode;
1124 case SPECTRE_V2_USER_CMD_FORCE:
1125 mode = SPECTRE_V2_USER_STRICT;
1126 break;
1127 case SPECTRE_V2_USER_CMD_AUTO:
1128 case SPECTRE_V2_USER_CMD_PRCTL:
1129 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1130 mode = SPECTRE_V2_USER_PRCTL;
1131 break;
1132 case SPECTRE_V2_USER_CMD_SECCOMP:
1133 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1134 if (IS_ENABLED(CONFIG_SECCOMP))
1135 mode = SPECTRE_V2_USER_SECCOMP;
1136 else
1137 mode = SPECTRE_V2_USER_PRCTL;
1138 break;
1139 }
1140
1141
1142 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1143 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1144
1145 spectre_v2_user_ibpb = mode;
1146 switch (cmd) {
1147 case SPECTRE_V2_USER_CMD_FORCE:
1148 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1149 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1150 static_branch_enable(&switch_mm_always_ibpb);
1151 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1152 break;
1153 case SPECTRE_V2_USER_CMD_PRCTL:
1154 case SPECTRE_V2_USER_CMD_AUTO:
1155 case SPECTRE_V2_USER_CMD_SECCOMP:
1156 static_branch_enable(&switch_mm_cond_ibpb);
1157 break;
1158 default:
1159 break;
1160 }
1161
1162 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1163 static_key_enabled(&switch_mm_always_ibpb) ?
1164 "always-on" : "conditional");
1165 }
1166
1167
1168
1169
1170
1171 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1172 !smt_possible ||
1173 spectre_v2_in_ibrs_mode(spectre_v2_enabled))
1174 return;
1175
1176
1177
1178
1179
1180
1181 if (mode != SPECTRE_V2_USER_STRICT &&
1182 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1183 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1184
1185 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1186 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
1187 if (mode != SPECTRE_V2_USER_STRICT &&
1188 mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1189 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1190 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1191 }
1192
1193 spectre_v2_user_stibp = mode;
1194
1195 set_mode:
1196 pr_info("%s\n", spectre_v2_user_strings[mode]);
1197 }
1198
1199 static const char * const spectre_v2_strings[] = {
1200 [SPECTRE_V2_NONE] = "Vulnerable",
1201 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
1202 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
1203 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
1204 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
1205 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
1206 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
1207 };
1208
1209 static const struct {
1210 const char *option;
1211 enum spectre_v2_mitigation_cmd cmd;
1212 bool secure;
1213 } mitigation_options[] __initconst = {
1214 { "off", SPECTRE_V2_CMD_NONE, false },
1215 { "on", SPECTRE_V2_CMD_FORCE, true },
1216 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
1217 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1218 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1219 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1220 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
1221 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
1222 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
1223 { "auto", SPECTRE_V2_CMD_AUTO, false },
1224 { "ibrs", SPECTRE_V2_CMD_IBRS, false },
1225 };
1226
1227 static void __init spec_v2_print_cond(const char *reason, bool secure)
1228 {
1229 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1230 pr_info("%s selected on command line.\n", reason);
1231 }
1232
1233 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1234 {
1235 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1236 char arg[20];
1237 int ret, i;
1238
1239 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1240 cpu_mitigations_off())
1241 return SPECTRE_V2_CMD_NONE;
1242
1243 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1244 if (ret < 0)
1245 return SPECTRE_V2_CMD_AUTO;
1246
1247 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1248 if (!match_option(arg, ret, mitigation_options[i].option))
1249 continue;
1250 cmd = mitigation_options[i].cmd;
1251 break;
1252 }
1253
1254 if (i >= ARRAY_SIZE(mitigation_options)) {
1255 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1256 return SPECTRE_V2_CMD_AUTO;
1257 }
1258
1259 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1260 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1261 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1262 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1263 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1264 !IS_ENABLED(CONFIG_RETPOLINE)) {
1265 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1266 mitigation_options[i].option);
1267 return SPECTRE_V2_CMD_AUTO;
1268 }
1269
1270 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1271 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1272 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1273 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1274 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1275 mitigation_options[i].option);
1276 return SPECTRE_V2_CMD_AUTO;
1277 }
1278
1279 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1280 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1281 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1282 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1283 mitigation_options[i].option);
1284 return SPECTRE_V2_CMD_AUTO;
1285 }
1286
1287 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
1288 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1289 mitigation_options[i].option);
1290 return SPECTRE_V2_CMD_AUTO;
1291 }
1292
1293 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1294 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1295 mitigation_options[i].option);
1296 return SPECTRE_V2_CMD_AUTO;
1297 }
1298
1299 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1300 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1301 mitigation_options[i].option);
1302 return SPECTRE_V2_CMD_AUTO;
1303 }
1304
1305 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
1306 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1307 mitigation_options[i].option);
1308 return SPECTRE_V2_CMD_AUTO;
1309 }
1310
1311 spec_v2_print_cond(mitigation_options[i].option,
1312 mitigation_options[i].secure);
1313 return cmd;
1314 }
1315
1316 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1317 {
1318 if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1319 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1320 return SPECTRE_V2_NONE;
1321 }
1322
1323 return SPECTRE_V2_RETPOLINE;
1324 }
1325
1326
1327 static void __init spec_ctrl_disable_kernel_rrsba(void)
1328 {
1329 u64 ia32_cap;
1330
1331 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1332 return;
1333
1334 ia32_cap = x86_read_arch_cap_msr();
1335
1336 if (ia32_cap & ARCH_CAP_RRSBA) {
1337 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1338 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1339 }
1340 }
1341
1342 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1343 {
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 switch (mode) {
1365 case SPECTRE_V2_NONE:
1366 return;
1367
1368 case SPECTRE_V2_EIBRS_LFENCE:
1369 case SPECTRE_V2_EIBRS:
1370 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1371 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1372 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1373 }
1374 return;
1375
1376 case SPECTRE_V2_EIBRS_RETPOLINE:
1377 case SPECTRE_V2_RETPOLINE:
1378 case SPECTRE_V2_LFENCE:
1379 case SPECTRE_V2_IBRS:
1380 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1381 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1382 return;
1383 }
1384
1385 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1386 dump_stack();
1387 }
1388
1389 static void __init spectre_v2_select_mitigation(void)
1390 {
1391 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1392 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1393
1394
1395
1396
1397
1398 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1399 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1400 return;
1401
1402 switch (cmd) {
1403 case SPECTRE_V2_CMD_NONE:
1404 return;
1405
1406 case SPECTRE_V2_CMD_FORCE:
1407 case SPECTRE_V2_CMD_AUTO:
1408 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1409 mode = SPECTRE_V2_EIBRS;
1410 break;
1411 }
1412
1413 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
1414 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1415 retbleed_cmd != RETBLEED_CMD_OFF &&
1416 boot_cpu_has(X86_FEATURE_IBRS) &&
1417 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1418 mode = SPECTRE_V2_IBRS;
1419 break;
1420 }
1421
1422 mode = spectre_v2_select_retpoline();
1423 break;
1424
1425 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1426 pr_err(SPECTRE_V2_LFENCE_MSG);
1427 mode = SPECTRE_V2_LFENCE;
1428 break;
1429
1430 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1431 mode = SPECTRE_V2_RETPOLINE;
1432 break;
1433
1434 case SPECTRE_V2_CMD_RETPOLINE:
1435 mode = spectre_v2_select_retpoline();
1436 break;
1437
1438 case SPECTRE_V2_CMD_IBRS:
1439 mode = SPECTRE_V2_IBRS;
1440 break;
1441
1442 case SPECTRE_V2_CMD_EIBRS:
1443 mode = SPECTRE_V2_EIBRS;
1444 break;
1445
1446 case SPECTRE_V2_CMD_EIBRS_LFENCE:
1447 mode = SPECTRE_V2_EIBRS_LFENCE;
1448 break;
1449
1450 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1451 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1452 break;
1453 }
1454
1455 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1456 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1457
1458 if (spectre_v2_in_ibrs_mode(mode)) {
1459 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1460 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1461 }
1462
1463 switch (mode) {
1464 case SPECTRE_V2_NONE:
1465 case SPECTRE_V2_EIBRS:
1466 break;
1467
1468 case SPECTRE_V2_IBRS:
1469 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1470 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1471 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1472 break;
1473
1474 case SPECTRE_V2_LFENCE:
1475 case SPECTRE_V2_EIBRS_LFENCE:
1476 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1477 fallthrough;
1478
1479 case SPECTRE_V2_RETPOLINE:
1480 case SPECTRE_V2_EIBRS_RETPOLINE:
1481 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1482 break;
1483 }
1484
1485
1486
1487
1488
1489
1490 if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1491 mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1492 mode == SPECTRE_V2_RETPOLINE)
1493 spec_ctrl_disable_kernel_rrsba();
1494
1495 spectre_v2_enabled = mode;
1496 pr_info("%s\n", spectre_v2_strings[mode]);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1537 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1538
1539 spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1553 boot_cpu_has(X86_FEATURE_IBPB) &&
1554 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1555 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
1556
1557 if (retbleed_cmd != RETBLEED_CMD_IBPB) {
1558 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
1559 pr_info("Enabling Speculation Barrier for firmware calls\n");
1560 }
1561
1562 } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
1563 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1564 pr_info("Enabling Restricted Speculation for firmware calls\n");
1565 }
1566
1567
1568 spectre_v2_cmd = cmd;
1569 }
1570
1571 static void update_stibp_msr(void * __unused)
1572 {
1573 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1574 write_spec_ctrl_current(val, true);
1575 }
1576
1577
1578 static void update_stibp_strict(void)
1579 {
1580 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1581
1582 if (sched_smt_active())
1583 mask |= SPEC_CTRL_STIBP;
1584
1585 if (mask == x86_spec_ctrl_base)
1586 return;
1587
1588 pr_info("Update user space SMT mitigation: STIBP %s\n",
1589 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1590 x86_spec_ctrl_base = mask;
1591 on_each_cpu(update_stibp_msr, NULL, 1);
1592 }
1593
1594
1595 static void update_indir_branch_cond(void)
1596 {
1597 if (sched_smt_active())
1598 static_branch_enable(&switch_to_cond_stibp);
1599 else
1600 static_branch_disable(&switch_to_cond_stibp);
1601 }
1602
1603 #undef pr_fmt
1604 #define pr_fmt(fmt) fmt
1605
1606
1607 static void update_mds_branch_idle(void)
1608 {
1609 u64 ia32_cap = x86_read_arch_cap_msr();
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1620 return;
1621
1622 if (sched_smt_active()) {
1623 static_branch_enable(&mds_idle_clear);
1624 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1625 (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1626 static_branch_disable(&mds_idle_clear);
1627 }
1628 }
1629
1630 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1631 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1632 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1633
1634 void cpu_bugs_smt_update(void)
1635 {
1636 mutex_lock(&spec_ctrl_mutex);
1637
1638 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1639 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1640 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1641
1642 switch (spectre_v2_user_stibp) {
1643 case SPECTRE_V2_USER_NONE:
1644 break;
1645 case SPECTRE_V2_USER_STRICT:
1646 case SPECTRE_V2_USER_STRICT_PREFERRED:
1647 update_stibp_strict();
1648 break;
1649 case SPECTRE_V2_USER_PRCTL:
1650 case SPECTRE_V2_USER_SECCOMP:
1651 update_indir_branch_cond();
1652 break;
1653 }
1654
1655 switch (mds_mitigation) {
1656 case MDS_MITIGATION_FULL:
1657 case MDS_MITIGATION_VMWERV:
1658 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1659 pr_warn_once(MDS_MSG_SMT);
1660 update_mds_branch_idle();
1661 break;
1662 case MDS_MITIGATION_OFF:
1663 break;
1664 }
1665
1666 switch (taa_mitigation) {
1667 case TAA_MITIGATION_VERW:
1668 case TAA_MITIGATION_UCODE_NEEDED:
1669 if (sched_smt_active())
1670 pr_warn_once(TAA_MSG_SMT);
1671 break;
1672 case TAA_MITIGATION_TSX_DISABLED:
1673 case TAA_MITIGATION_OFF:
1674 break;
1675 }
1676
1677 switch (mmio_mitigation) {
1678 case MMIO_MITIGATION_VERW:
1679 case MMIO_MITIGATION_UCODE_NEEDED:
1680 if (sched_smt_active())
1681 pr_warn_once(MMIO_MSG_SMT);
1682 break;
1683 case MMIO_MITIGATION_OFF:
1684 break;
1685 }
1686
1687 mutex_unlock(&spec_ctrl_mutex);
1688 }
1689
1690 #undef pr_fmt
1691 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1692
1693 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1694
1695
1696 enum ssb_mitigation_cmd {
1697 SPEC_STORE_BYPASS_CMD_NONE,
1698 SPEC_STORE_BYPASS_CMD_AUTO,
1699 SPEC_STORE_BYPASS_CMD_ON,
1700 SPEC_STORE_BYPASS_CMD_PRCTL,
1701 SPEC_STORE_BYPASS_CMD_SECCOMP,
1702 };
1703
1704 static const char * const ssb_strings[] = {
1705 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1706 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1707 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1708 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1709 };
1710
1711 static const struct {
1712 const char *option;
1713 enum ssb_mitigation_cmd cmd;
1714 } ssb_mitigation_options[] __initconst = {
1715 { "auto", SPEC_STORE_BYPASS_CMD_AUTO },
1716 { "on", SPEC_STORE_BYPASS_CMD_ON },
1717 { "off", SPEC_STORE_BYPASS_CMD_NONE },
1718 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL },
1719 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP },
1720 };
1721
1722 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1723 {
1724 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1725 char arg[20];
1726 int ret, i;
1727
1728 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1729 cpu_mitigations_off()) {
1730 return SPEC_STORE_BYPASS_CMD_NONE;
1731 } else {
1732 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1733 arg, sizeof(arg));
1734 if (ret < 0)
1735 return SPEC_STORE_BYPASS_CMD_AUTO;
1736
1737 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1738 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1739 continue;
1740
1741 cmd = ssb_mitigation_options[i].cmd;
1742 break;
1743 }
1744
1745 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1746 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1747 return SPEC_STORE_BYPASS_CMD_AUTO;
1748 }
1749 }
1750
1751 return cmd;
1752 }
1753
1754 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1755 {
1756 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1757 enum ssb_mitigation_cmd cmd;
1758
1759 if (!boot_cpu_has(X86_FEATURE_SSBD))
1760 return mode;
1761
1762 cmd = ssb_parse_cmdline();
1763 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1764 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1765 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1766 return mode;
1767
1768 switch (cmd) {
1769 case SPEC_STORE_BYPASS_CMD_SECCOMP:
1770
1771
1772
1773
1774 if (IS_ENABLED(CONFIG_SECCOMP))
1775 mode = SPEC_STORE_BYPASS_SECCOMP;
1776 else
1777 mode = SPEC_STORE_BYPASS_PRCTL;
1778 break;
1779 case SPEC_STORE_BYPASS_CMD_ON:
1780 mode = SPEC_STORE_BYPASS_DISABLE;
1781 break;
1782 case SPEC_STORE_BYPASS_CMD_AUTO:
1783 case SPEC_STORE_BYPASS_CMD_PRCTL:
1784 mode = SPEC_STORE_BYPASS_PRCTL;
1785 break;
1786 case SPEC_STORE_BYPASS_CMD_NONE:
1787 break;
1788 }
1789
1790
1791
1792
1793
1794
1795
1796 if (mode == SPEC_STORE_BYPASS_DISABLE) {
1797 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1798
1799
1800
1801
1802 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1803 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1804 x86_amd_ssb_disable();
1805 } else {
1806 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1807 write_spec_ctrl_current(x86_spec_ctrl_base, true);
1808 }
1809 }
1810
1811 return mode;
1812 }
1813
1814 static void ssb_select_mitigation(void)
1815 {
1816 ssb_mode = __ssb_select_mitigation();
1817
1818 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1819 pr_info("%s\n", ssb_strings[ssb_mode]);
1820 }
1821
1822 #undef pr_fmt
1823 #define pr_fmt(fmt) "Speculation prctl: " fmt
1824
1825 static void task_update_spec_tif(struct task_struct *tsk)
1826 {
1827
1828 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 if (tsk == current)
1839 speculation_ctrl_update_current();
1840 }
1841
1842 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
1843 {
1844
1845 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1846 return -EPERM;
1847
1848 switch (ctrl) {
1849 case PR_SPEC_ENABLE:
1850 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1851 return 0;
1852 case PR_SPEC_DISABLE:
1853 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1854 return 0;
1855 default:
1856 return -ERANGE;
1857 }
1858 }
1859
1860 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1861 {
1862 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1863 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1864 return -ENXIO;
1865
1866 switch (ctrl) {
1867 case PR_SPEC_ENABLE:
1868
1869 if (task_spec_ssb_force_disable(task))
1870 return -EPERM;
1871 task_clear_spec_ssb_disable(task);
1872 task_clear_spec_ssb_noexec(task);
1873 task_update_spec_tif(task);
1874 break;
1875 case PR_SPEC_DISABLE:
1876 task_set_spec_ssb_disable(task);
1877 task_clear_spec_ssb_noexec(task);
1878 task_update_spec_tif(task);
1879 break;
1880 case PR_SPEC_FORCE_DISABLE:
1881 task_set_spec_ssb_disable(task);
1882 task_set_spec_ssb_force_disable(task);
1883 task_clear_spec_ssb_noexec(task);
1884 task_update_spec_tif(task);
1885 break;
1886 case PR_SPEC_DISABLE_NOEXEC:
1887 if (task_spec_ssb_force_disable(task))
1888 return -EPERM;
1889 task_set_spec_ssb_disable(task);
1890 task_set_spec_ssb_noexec(task);
1891 task_update_spec_tif(task);
1892 break;
1893 default:
1894 return -ERANGE;
1895 }
1896 return 0;
1897 }
1898
1899 static bool is_spec_ib_user_controlled(void)
1900 {
1901 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1902 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1903 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1904 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1905 }
1906
1907 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1908 {
1909 switch (ctrl) {
1910 case PR_SPEC_ENABLE:
1911 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1912 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1913 return 0;
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 if (!is_spec_ib_user_controlled() ||
1931 task_spec_ib_force_disable(task))
1932 return -EPERM;
1933
1934 task_clear_spec_ib_disable(task);
1935 task_update_spec_tif(task);
1936 break;
1937 case PR_SPEC_DISABLE:
1938 case PR_SPEC_FORCE_DISABLE:
1939
1940
1941
1942
1943 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1944 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1945 return -EPERM;
1946
1947 if (!is_spec_ib_user_controlled())
1948 return 0;
1949
1950 task_set_spec_ib_disable(task);
1951 if (ctrl == PR_SPEC_FORCE_DISABLE)
1952 task_set_spec_ib_force_disable(task);
1953 task_update_spec_tif(task);
1954 break;
1955 default:
1956 return -ERANGE;
1957 }
1958 return 0;
1959 }
1960
1961 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1962 unsigned long ctrl)
1963 {
1964 switch (which) {
1965 case PR_SPEC_STORE_BYPASS:
1966 return ssb_prctl_set(task, ctrl);
1967 case PR_SPEC_INDIRECT_BRANCH:
1968 return ib_prctl_set(task, ctrl);
1969 case PR_SPEC_L1D_FLUSH:
1970 return l1d_flush_prctl_set(task, ctrl);
1971 default:
1972 return -ENODEV;
1973 }
1974 }
1975
1976 #ifdef CONFIG_SECCOMP
1977 void arch_seccomp_spec_mitigate(struct task_struct *task)
1978 {
1979 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1980 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1981 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1982 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1983 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1984 }
1985 #endif
1986
1987 static int l1d_flush_prctl_get(struct task_struct *task)
1988 {
1989 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1990 return PR_SPEC_FORCE_DISABLE;
1991
1992 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
1993 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1994 else
1995 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1996 }
1997
1998 static int ssb_prctl_get(struct task_struct *task)
1999 {
2000 switch (ssb_mode) {
2001 case SPEC_STORE_BYPASS_DISABLE:
2002 return PR_SPEC_DISABLE;
2003 case SPEC_STORE_BYPASS_SECCOMP:
2004 case SPEC_STORE_BYPASS_PRCTL:
2005 if (task_spec_ssb_force_disable(task))
2006 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2007 if (task_spec_ssb_noexec(task))
2008 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2009 if (task_spec_ssb_disable(task))
2010 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2011 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2012 default:
2013 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2014 return PR_SPEC_ENABLE;
2015 return PR_SPEC_NOT_AFFECTED;
2016 }
2017 }
2018
2019 static int ib_prctl_get(struct task_struct *task)
2020 {
2021 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2022 return PR_SPEC_NOT_AFFECTED;
2023
2024 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2025 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2026 return PR_SPEC_ENABLE;
2027 else if (is_spec_ib_user_controlled()) {
2028 if (task_spec_ib_force_disable(task))
2029 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2030 if (task_spec_ib_disable(task))
2031 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2032 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2033 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2034 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2035 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2036 return PR_SPEC_DISABLE;
2037 else
2038 return PR_SPEC_NOT_AFFECTED;
2039 }
2040
2041 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2042 {
2043 switch (which) {
2044 case PR_SPEC_STORE_BYPASS:
2045 return ssb_prctl_get(task);
2046 case PR_SPEC_INDIRECT_BRANCH:
2047 return ib_prctl_get(task);
2048 case PR_SPEC_L1D_FLUSH:
2049 return l1d_flush_prctl_get(task);
2050 default:
2051 return -ENODEV;
2052 }
2053 }
2054
2055 void x86_spec_ctrl_setup_ap(void)
2056 {
2057 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2058 write_spec_ctrl_current(x86_spec_ctrl_base, true);
2059
2060 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2061 x86_amd_ssb_disable();
2062 }
2063
2064 bool itlb_multihit_kvm_mitigation;
2065 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2066
2067 #undef pr_fmt
2068 #define pr_fmt(fmt) "L1TF: " fmt
2069
2070
2071 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
2072 #if IS_ENABLED(CONFIG_KVM_INTEL)
2073 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2074 #endif
2075 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2076 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 static void override_cache_bits(struct cpuinfo_x86 *c)
2093 {
2094 if (c->x86 != 6)
2095 return;
2096
2097 switch (c->x86_model) {
2098 case INTEL_FAM6_NEHALEM:
2099 case INTEL_FAM6_WESTMERE:
2100 case INTEL_FAM6_SANDYBRIDGE:
2101 case INTEL_FAM6_IVYBRIDGE:
2102 case INTEL_FAM6_HASWELL:
2103 case INTEL_FAM6_HASWELL_L:
2104 case INTEL_FAM6_HASWELL_G:
2105 case INTEL_FAM6_BROADWELL:
2106 case INTEL_FAM6_BROADWELL_G:
2107 case INTEL_FAM6_SKYLAKE_L:
2108 case INTEL_FAM6_SKYLAKE:
2109 case INTEL_FAM6_KABYLAKE_L:
2110 case INTEL_FAM6_KABYLAKE:
2111 if (c->x86_cache_bits < 44)
2112 c->x86_cache_bits = 44;
2113 break;
2114 }
2115 }
2116
2117 static void __init l1tf_select_mitigation(void)
2118 {
2119 u64 half_pa;
2120
2121 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2122 return;
2123
2124 if (cpu_mitigations_off())
2125 l1tf_mitigation = L1TF_MITIGATION_OFF;
2126 else if (cpu_mitigations_auto_nosmt())
2127 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2128
2129 override_cache_bits(&boot_cpu_data);
2130
2131 switch (l1tf_mitigation) {
2132 case L1TF_MITIGATION_OFF:
2133 case L1TF_MITIGATION_FLUSH_NOWARN:
2134 case L1TF_MITIGATION_FLUSH:
2135 break;
2136 case L1TF_MITIGATION_FLUSH_NOSMT:
2137 case L1TF_MITIGATION_FULL:
2138 cpu_smt_disable(false);
2139 break;
2140 case L1TF_MITIGATION_FULL_FORCE:
2141 cpu_smt_disable(true);
2142 break;
2143 }
2144
2145 #if CONFIG_PGTABLE_LEVELS == 2
2146 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2147 return;
2148 #endif
2149
2150 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2151 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2152 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2153 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2154 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2155 half_pa);
2156 pr_info("However, doing so will make a part of your RAM unusable.\n");
2157 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2158 return;
2159 }
2160
2161 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2162 }
2163
2164 static int __init l1tf_cmdline(char *str)
2165 {
2166 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2167 return 0;
2168
2169 if (!str)
2170 return -EINVAL;
2171
2172 if (!strcmp(str, "off"))
2173 l1tf_mitigation = L1TF_MITIGATION_OFF;
2174 else if (!strcmp(str, "flush,nowarn"))
2175 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2176 else if (!strcmp(str, "flush"))
2177 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2178 else if (!strcmp(str, "flush,nosmt"))
2179 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2180 else if (!strcmp(str, "full"))
2181 l1tf_mitigation = L1TF_MITIGATION_FULL;
2182 else if (!strcmp(str, "full,force"))
2183 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2184
2185 return 0;
2186 }
2187 early_param("l1tf", l1tf_cmdline);
2188
2189 #undef pr_fmt
2190 #define pr_fmt(fmt) fmt
2191
2192 #ifdef CONFIG_SYSFS
2193
2194 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2195
2196 #if IS_ENABLED(CONFIG_KVM_INTEL)
2197 static const char * const l1tf_vmx_states[] = {
2198 [VMENTER_L1D_FLUSH_AUTO] = "auto",
2199 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
2200 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
2201 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
2202 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
2203 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
2204 };
2205
2206 static ssize_t l1tf_show_state(char *buf)
2207 {
2208 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
2209 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2210
2211 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2212 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2213 sched_smt_active())) {
2214 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2215 l1tf_vmx_states[l1tf_vmx_mitigation]);
2216 }
2217
2218 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2219 l1tf_vmx_states[l1tf_vmx_mitigation],
2220 sched_smt_active() ? "vulnerable" : "disabled");
2221 }
2222
2223 static ssize_t itlb_multihit_show_state(char *buf)
2224 {
2225 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2226 !boot_cpu_has(X86_FEATURE_VMX))
2227 return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
2228 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2229 return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
2230 else if (itlb_multihit_kvm_mitigation)
2231 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
2232 else
2233 return sprintf(buf, "KVM: Vulnerable\n");
2234 }
2235 #else
2236 static ssize_t l1tf_show_state(char *buf)
2237 {
2238 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2239 }
2240
2241 static ssize_t itlb_multihit_show_state(char *buf)
2242 {
2243 return sprintf(buf, "Processor vulnerable\n");
2244 }
2245 #endif
2246
2247 static ssize_t mds_show_state(char *buf)
2248 {
2249 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2250 return sprintf(buf, "%s; SMT Host state unknown\n",
2251 mds_strings[mds_mitigation]);
2252 }
2253
2254 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2255 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2256 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2257 sched_smt_active() ? "mitigated" : "disabled"));
2258 }
2259
2260 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2261 sched_smt_active() ? "vulnerable" : "disabled");
2262 }
2263
2264 static ssize_t tsx_async_abort_show_state(char *buf)
2265 {
2266 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2267 (taa_mitigation == TAA_MITIGATION_OFF))
2268 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
2269
2270 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2271 return sprintf(buf, "%s; SMT Host state unknown\n",
2272 taa_strings[taa_mitigation]);
2273 }
2274
2275 return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2276 sched_smt_active() ? "vulnerable" : "disabled");
2277 }
2278
2279 static ssize_t mmio_stale_data_show_state(char *buf)
2280 {
2281 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2282 return sysfs_emit(buf, "Unknown: No mitigations\n");
2283
2284 if (mmio_mitigation == MMIO_MITIGATION_OFF)
2285 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2286
2287 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2288 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2289 mmio_strings[mmio_mitigation]);
2290 }
2291
2292 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2293 sched_smt_active() ? "vulnerable" : "disabled");
2294 }
2295
2296 static char *stibp_state(void)
2297 {
2298 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
2299 return "";
2300
2301 switch (spectre_v2_user_stibp) {
2302 case SPECTRE_V2_USER_NONE:
2303 return ", STIBP: disabled";
2304 case SPECTRE_V2_USER_STRICT:
2305 return ", STIBP: forced";
2306 case SPECTRE_V2_USER_STRICT_PREFERRED:
2307 return ", STIBP: always-on";
2308 case SPECTRE_V2_USER_PRCTL:
2309 case SPECTRE_V2_USER_SECCOMP:
2310 if (static_key_enabled(&switch_to_cond_stibp))
2311 return ", STIBP: conditional";
2312 }
2313 return "";
2314 }
2315
2316 static char *ibpb_state(void)
2317 {
2318 if (boot_cpu_has(X86_FEATURE_IBPB)) {
2319 if (static_key_enabled(&switch_mm_always_ibpb))
2320 return ", IBPB: always-on";
2321 if (static_key_enabled(&switch_mm_cond_ibpb))
2322 return ", IBPB: conditional";
2323 return ", IBPB: disabled";
2324 }
2325 return "";
2326 }
2327
2328 static char *pbrsb_eibrs_state(void)
2329 {
2330 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2331 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2332 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2333 return ", PBRSB-eIBRS: SW sequence";
2334 else
2335 return ", PBRSB-eIBRS: Vulnerable";
2336 } else {
2337 return ", PBRSB-eIBRS: Not affected";
2338 }
2339 }
2340
2341 static ssize_t spectre_v2_show_state(char *buf)
2342 {
2343 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2344 return sprintf(buf, "Vulnerable: LFENCE\n");
2345
2346 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2347 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2348
2349 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2350 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2351 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2352
2353 return sprintf(buf, "%s%s%s%s%s%s%s\n",
2354 spectre_v2_strings[spectre_v2_enabled],
2355 ibpb_state(),
2356 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2357 stibp_state(),
2358 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2359 pbrsb_eibrs_state(),
2360 spectre_v2_module_string());
2361 }
2362
2363 static ssize_t srbds_show_state(char *buf)
2364 {
2365 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
2366 }
2367
2368 static ssize_t retbleed_show_state(char *buf)
2369 {
2370 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2371 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2372 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2373 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2374 return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2375
2376 return sprintf(buf, "%s; SMT %s\n",
2377 retbleed_strings[retbleed_mitigation],
2378 !sched_smt_active() ? "disabled" :
2379 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2380 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2381 "enabled with STIBP protection" : "vulnerable");
2382 }
2383
2384 return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2385 }
2386
2387 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2388 char *buf, unsigned int bug)
2389 {
2390 if (!boot_cpu_has_bug(bug))
2391 return sprintf(buf, "Not affected\n");
2392
2393 switch (bug) {
2394 case X86_BUG_CPU_MELTDOWN:
2395 if (boot_cpu_has(X86_FEATURE_PTI))
2396 return sprintf(buf, "Mitigation: PTI\n");
2397
2398 if (hypervisor_is_type(X86_HYPER_XEN_PV))
2399 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2400
2401 break;
2402
2403 case X86_BUG_SPECTRE_V1:
2404 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2405
2406 case X86_BUG_SPECTRE_V2:
2407 return spectre_v2_show_state(buf);
2408
2409 case X86_BUG_SPEC_STORE_BYPASS:
2410 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
2411
2412 case X86_BUG_L1TF:
2413 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2414 return l1tf_show_state(buf);
2415 break;
2416
2417 case X86_BUG_MDS:
2418 return mds_show_state(buf);
2419
2420 case X86_BUG_TAA:
2421 return tsx_async_abort_show_state(buf);
2422
2423 case X86_BUG_ITLB_MULTIHIT:
2424 return itlb_multihit_show_state(buf);
2425
2426 case X86_BUG_SRBDS:
2427 return srbds_show_state(buf);
2428
2429 case X86_BUG_MMIO_STALE_DATA:
2430 case X86_BUG_MMIO_UNKNOWN:
2431 return mmio_stale_data_show_state(buf);
2432
2433 case X86_BUG_RETBLEED:
2434 return retbleed_show_state(buf);
2435
2436 default:
2437 break;
2438 }
2439
2440 return sprintf(buf, "Vulnerable\n");
2441 }
2442
2443 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2444 {
2445 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2446 }
2447
2448 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2449 {
2450 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2451 }
2452
2453 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2454 {
2455 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2456 }
2457
2458 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2459 {
2460 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2461 }
2462
2463 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2464 {
2465 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2466 }
2467
2468 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2469 {
2470 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2471 }
2472
2473 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2474 {
2475 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2476 }
2477
2478 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2479 {
2480 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2481 }
2482
2483 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2484 {
2485 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2486 }
2487
2488 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2489 {
2490 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2491 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2492 else
2493 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2494 }
2495
2496 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2497 {
2498 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2499 }
2500 #endif