0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/arm-smccc.h>
0021 #include <linux/bpf.h>
0022 #include <linux/cpu.h>
0023 #include <linux/device.h>
0024 #include <linux/nospec.h>
0025 #include <linux/prctl.h>
0026 #include <linux/sched/task_stack.h>
0027
0028 #include <asm/debug-monitors.h>
0029 #include <asm/insn.h>
0030 #include <asm/spectre.h>
0031 #include <asm/traps.h>
0032 #include <asm/vectors.h>
0033 #include <asm/virt.h>
0034
0035
0036
0037
0038
0039 static void update_mitigation_state(enum mitigation_state *oldp,
0040 enum mitigation_state new)
0041 {
0042 enum mitigation_state state;
0043
0044 do {
0045 state = READ_ONCE(*oldp);
0046 if (new <= state)
0047 break;
0048
0049
0050 if (WARN_ON(system_capabilities_finalized()))
0051 break;
0052 } while (cmpxchg_relaxed(oldp, state, new) != state);
0053 }
0054
0055
0056
0057
0058
0059
0060
0061 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
0062 char *buf)
0063 {
0064 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
0065 }
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 static enum mitigation_state spectre_v2_state;
0083
0084 static bool __read_mostly __nospectre_v2;
0085 static int __init parse_spectre_v2_param(char *str)
0086 {
0087 __nospectre_v2 = true;
0088 return 0;
0089 }
0090 early_param("nospectre_v2", parse_spectre_v2_param);
0091
0092 static bool spectre_v2_mitigations_off(void)
0093 {
0094 bool ret = __nospectre_v2 || cpu_mitigations_off();
0095
0096 if (ret)
0097 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
0098
0099 return ret;
0100 }
0101
0102 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
0103 {
0104 switch (bhb_state) {
0105 case SPECTRE_UNAFFECTED:
0106 return "";
0107 default:
0108 case SPECTRE_VULNERABLE:
0109 return ", but not BHB";
0110 case SPECTRE_MITIGATED:
0111 return ", BHB";
0112 }
0113 }
0114
0115 static bool _unprivileged_ebpf_enabled(void)
0116 {
0117 #ifdef CONFIG_BPF_SYSCALL
0118 return !sysctl_unprivileged_bpf_disabled;
0119 #else
0120 return false;
0121 #endif
0122 }
0123
0124 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
0125 char *buf)
0126 {
0127 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
0128 const char *bhb_str = get_bhb_affected_string(bhb_state);
0129 const char *v2_str = "Branch predictor hardening";
0130
0131 switch (spectre_v2_state) {
0132 case SPECTRE_UNAFFECTED:
0133 if (bhb_state == SPECTRE_UNAFFECTED)
0134 return sprintf(buf, "Not affected\n");
0135
0136
0137
0138
0139
0140 v2_str = "CSV2";
0141 fallthrough;
0142 case SPECTRE_MITIGATED:
0143 if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
0144 return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
0145
0146 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
0147 case SPECTRE_VULNERABLE:
0148 fallthrough;
0149 default:
0150 return sprintf(buf, "Vulnerable\n");
0151 }
0152 }
0153
0154 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
0155 {
0156 u64 pfr0;
0157 static const struct midr_range spectre_v2_safe_list[] = {
0158 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
0159 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
0160 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
0161 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
0162 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
0163 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
0164 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
0165 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
0166 { }
0167 };
0168
0169
0170 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
0171 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
0172 return SPECTRE_UNAFFECTED;
0173
0174
0175 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
0176 return SPECTRE_UNAFFECTED;
0177
0178 return SPECTRE_VULNERABLE;
0179 }
0180
0181 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
0182 {
0183 int ret;
0184 struct arm_smccc_res res;
0185
0186 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0187 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
0188
0189 ret = res.a0;
0190 switch (ret) {
0191 case SMCCC_RET_SUCCESS:
0192 return SPECTRE_MITIGATED;
0193 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0194 return SPECTRE_UNAFFECTED;
0195 default:
0196 fallthrough;
0197 case SMCCC_RET_NOT_SUPPORTED:
0198 return SPECTRE_VULNERABLE;
0199 }
0200 }
0201
0202 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
0203 {
0204 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0205
0206 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
0207 return false;
0208
0209 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
0210 return false;
0211
0212 return true;
0213 }
0214
0215 enum mitigation_state arm64_get_spectre_v2_state(void)
0216 {
0217 return spectre_v2_state;
0218 }
0219
0220 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
0221
0222 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
0223 {
0224 __this_cpu_write(bp_hardening_data.fn, fn);
0225
0226
0227
0228
0229
0230 if (!is_hyp_mode_available())
0231 return;
0232
0233 __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
0234 }
0235
0236
0237 static noinstr void call_smc_arch_workaround_1(void)
0238 {
0239 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
0240 }
0241
0242
0243 static noinstr void call_hvc_arch_workaround_1(void)
0244 {
0245 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
0246 }
0247
0248
0249 static noinstr void qcom_link_stack_sanitisation(void)
0250 {
0251 u64 tmp;
0252
0253 asm volatile("mov %0, x30 \n"
0254 ".rept 16 \n"
0255 "bl . + 4 \n"
0256 ".endr \n"
0257 "mov x30, %0 \n"
0258 : "=&r" (tmp));
0259 }
0260
0261 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
0262 {
0263 u32 midr = read_cpuid_id();
0264 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
0265 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
0266 return NULL;
0267
0268 return qcom_link_stack_sanitisation;
0269 }
0270
0271 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
0272 {
0273 bp_hardening_cb_t cb;
0274 enum mitigation_state state;
0275
0276 state = spectre_v2_get_cpu_fw_mitigation_state();
0277 if (state != SPECTRE_MITIGATED)
0278 return state;
0279
0280 if (spectre_v2_mitigations_off())
0281 return SPECTRE_VULNERABLE;
0282
0283 switch (arm_smccc_1_1_get_conduit()) {
0284 case SMCCC_CONDUIT_HVC:
0285 cb = call_hvc_arch_workaround_1;
0286 break;
0287
0288 case SMCCC_CONDUIT_SMC:
0289 cb = call_smc_arch_workaround_1;
0290 break;
0291
0292 default:
0293 return SPECTRE_VULNERABLE;
0294 }
0295
0296
0297
0298
0299
0300 cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
0301 install_bp_hardening_cb(cb);
0302 return SPECTRE_MITIGATED;
0303 }
0304
0305 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
0306 {
0307 enum mitigation_state state;
0308
0309 WARN_ON(preemptible());
0310
0311 state = spectre_v2_get_cpu_hw_mitigation_state();
0312 if (state == SPECTRE_VULNERABLE)
0313 state = spectre_v2_enable_fw_mitigation();
0314
0315 update_mitigation_state(&spectre_v2_state, state);
0316 }
0317
0318
0319
0320
0321
0322
0323
0324
0325 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
0326 {
0327 static const struct midr_range spectre_v3a_unsafe_list[] = {
0328 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
0329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
0330 {},
0331 };
0332
0333 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0334 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
0335 }
0336
0337 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
0338 {
0339 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
0340
0341 if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
0342 data->slot += HYP_VECTOR_INDIRECT;
0343 }
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 static enum mitigation_state spectre_v4_state;
0374
0375
0376 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
0377
0378 enum spectre_v4_policy {
0379 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
0380 SPECTRE_V4_POLICY_MITIGATION_ENABLED,
0381 SPECTRE_V4_POLICY_MITIGATION_DISABLED,
0382 };
0383
0384 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
0385
0386 static const struct spectre_v4_param {
0387 const char *str;
0388 enum spectre_v4_policy policy;
0389 } spectre_v4_params[] = {
0390 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
0391 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
0392 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
0393 };
0394 static int __init parse_spectre_v4_param(char *str)
0395 {
0396 int i;
0397
0398 if (!str || !str[0])
0399 return -EINVAL;
0400
0401 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
0402 const struct spectre_v4_param *param = &spectre_v4_params[i];
0403
0404 if (strncmp(str, param->str, strlen(param->str)))
0405 continue;
0406
0407 __spectre_v4_policy = param->policy;
0408 return 0;
0409 }
0410
0411 return -EINVAL;
0412 }
0413 early_param("ssbd", parse_spectre_v4_param);
0414
0415
0416
0417
0418
0419
0420
0421
0422 static bool spectre_v4_mitigations_off(void)
0423 {
0424 bool ret = cpu_mitigations_off() ||
0425 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
0426
0427 if (ret)
0428 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
0429
0430 return ret;
0431 }
0432
0433
0434 static bool spectre_v4_mitigations_dynamic(void)
0435 {
0436 return !spectre_v4_mitigations_off() &&
0437 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
0438 }
0439
0440 static bool spectre_v4_mitigations_on(void)
0441 {
0442 return !spectre_v4_mitigations_off() &&
0443 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
0444 }
0445
0446 ssize_t cpu_show_spec_store_bypass(struct device *dev,
0447 struct device_attribute *attr, char *buf)
0448 {
0449 switch (spectre_v4_state) {
0450 case SPECTRE_UNAFFECTED:
0451 return sprintf(buf, "Not affected\n");
0452 case SPECTRE_MITIGATED:
0453 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
0454 case SPECTRE_VULNERABLE:
0455 fallthrough;
0456 default:
0457 return sprintf(buf, "Vulnerable\n");
0458 }
0459 }
0460
0461 enum mitigation_state arm64_get_spectre_v4_state(void)
0462 {
0463 return spectre_v4_state;
0464 }
0465
0466 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
0467 {
0468 static const struct midr_range spectre_v4_safe_list[] = {
0469 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
0470 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
0471 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
0472 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
0473 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
0474 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
0475 { },
0476 };
0477
0478 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
0479 return SPECTRE_UNAFFECTED;
0480
0481
0482 if (this_cpu_has_cap(ARM64_SSBS))
0483 return SPECTRE_MITIGATED;
0484
0485 return SPECTRE_VULNERABLE;
0486 }
0487
0488 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
0489 {
0490 int ret;
0491 struct arm_smccc_res res;
0492
0493 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0494 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
0495
0496 ret = res.a0;
0497 switch (ret) {
0498 case SMCCC_RET_SUCCESS:
0499 return SPECTRE_MITIGATED;
0500 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0501 fallthrough;
0502 case SMCCC_RET_NOT_REQUIRED:
0503 return SPECTRE_UNAFFECTED;
0504 default:
0505 fallthrough;
0506 case SMCCC_RET_NOT_SUPPORTED:
0507 return SPECTRE_VULNERABLE;
0508 }
0509 }
0510
0511 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
0512 {
0513 enum mitigation_state state;
0514
0515 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0516
0517 state = spectre_v4_get_cpu_hw_mitigation_state();
0518 if (state == SPECTRE_VULNERABLE)
0519 state = spectre_v4_get_cpu_fw_mitigation_state();
0520
0521 return state != SPECTRE_UNAFFECTED;
0522 }
0523
0524 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
0525 {
0526 if (user_mode(regs))
0527 return 1;
0528
0529 if (instr & BIT(PSTATE_Imm_shift))
0530 regs->pstate |= PSR_SSBS_BIT;
0531 else
0532 regs->pstate &= ~PSR_SSBS_BIT;
0533
0534 arm64_skip_faulting_instruction(regs, 4);
0535 return 0;
0536 }
0537
0538 static struct undef_hook ssbs_emulation_hook = {
0539 .instr_mask = ~(1U << PSTATE_Imm_shift),
0540 .instr_val = 0xd500401f | PSTATE_SSBS,
0541 .fn = ssbs_emulation_handler,
0542 };
0543
0544 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
0545 {
0546 static bool undef_hook_registered = false;
0547 static DEFINE_RAW_SPINLOCK(hook_lock);
0548 enum mitigation_state state;
0549
0550
0551
0552
0553
0554 state = spectre_v4_get_cpu_hw_mitigation_state();
0555 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
0556 return state;
0557
0558 raw_spin_lock(&hook_lock);
0559 if (!undef_hook_registered) {
0560 register_undef_hook(&ssbs_emulation_hook);
0561 undef_hook_registered = true;
0562 }
0563 raw_spin_unlock(&hook_lock);
0564
0565 if (spectre_v4_mitigations_off()) {
0566 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
0567 set_pstate_ssbs(1);
0568 return SPECTRE_VULNERABLE;
0569 }
0570
0571
0572 set_pstate_ssbs(0);
0573 return SPECTRE_MITIGATED;
0574 }
0575
0576
0577
0578
0579
0580 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
0581 __le32 *origptr,
0582 __le32 *updptr, int nr_inst)
0583 {
0584 BUG_ON(nr_inst != 1);
0585
0586 if (spectre_v4_mitigations_off())
0587 return;
0588
0589 if (cpus_have_final_cap(ARM64_SSBS))
0590 return;
0591
0592 if (spectre_v4_mitigations_dynamic())
0593 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
0594 }
0595
0596
0597
0598
0599
0600 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
0601 __le32 *origptr,
0602 __le32 *updptr, int nr_inst)
0603 {
0604 u32 insn;
0605
0606 BUG_ON(nr_inst != 1);
0607
0608 switch (arm_smccc_1_1_get_conduit()) {
0609 case SMCCC_CONDUIT_HVC:
0610 insn = aarch64_insn_get_hvc_value();
0611 break;
0612 case SMCCC_CONDUIT_SMC:
0613 insn = aarch64_insn_get_smc_value();
0614 break;
0615 default:
0616 return;
0617 }
0618
0619 *updptr = cpu_to_le32(insn);
0620 }
0621
0622 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
0623 {
0624 enum mitigation_state state;
0625
0626 state = spectre_v4_get_cpu_fw_mitigation_state();
0627 if (state != SPECTRE_MITIGATED)
0628 return state;
0629
0630 if (spectre_v4_mitigations_off()) {
0631 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
0632 return SPECTRE_VULNERABLE;
0633 }
0634
0635 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
0636
0637 if (spectre_v4_mitigations_dynamic())
0638 __this_cpu_write(arm64_ssbd_callback_required, 1);
0639
0640 return SPECTRE_MITIGATED;
0641 }
0642
0643 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
0644 {
0645 enum mitigation_state state;
0646
0647 WARN_ON(preemptible());
0648
0649 state = spectre_v4_enable_hw_mitigation();
0650 if (state == SPECTRE_VULNERABLE)
0651 state = spectre_v4_enable_fw_mitigation();
0652
0653 update_mitigation_state(&spectre_v4_state, state);
0654 }
0655
0656 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
0657 {
0658 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
0659
0660 if (state)
0661 regs->pstate |= bit;
0662 else
0663 regs->pstate &= ~bit;
0664 }
0665
0666 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
0667 {
0668 struct pt_regs *regs = task_pt_regs(tsk);
0669 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
0670
0671 if (spectre_v4_mitigations_off())
0672 ssbs = true;
0673 else if (spectre_v4_mitigations_dynamic() && !kthread)
0674 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
0675
0676 __update_pstate_ssbs(regs, ssbs);
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
0687 {
0688 task_clear_spec_ssb_noexec(task);
0689 task_set_spec_ssb_disable(task);
0690 set_tsk_thread_flag(task, TIF_SSBD);
0691 }
0692
0693 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
0694 {
0695 task_clear_spec_ssb_noexec(task);
0696 task_clear_spec_ssb_disable(task);
0697 clear_tsk_thread_flag(task, TIF_SSBD);
0698 }
0699
0700 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
0701 {
0702 switch (ctrl) {
0703 case PR_SPEC_ENABLE:
0704
0705
0706
0707
0708
0709 if (task_spec_ssb_force_disable(task))
0710 return -EPERM;
0711
0712
0713
0714
0715
0716 if (spectre_v4_mitigations_on())
0717 return -EPERM;
0718
0719 ssbd_prctl_disable_mitigation(task);
0720 break;
0721 case PR_SPEC_FORCE_DISABLE:
0722
0723
0724
0725
0726
0727 if (spectre_v4_mitigations_off())
0728 return -EPERM;
0729
0730 task_set_spec_ssb_force_disable(task);
0731 fallthrough;
0732 case PR_SPEC_DISABLE:
0733
0734
0735 if (spectre_v4_mitigations_off())
0736 return -EPERM;
0737
0738 ssbd_prctl_enable_mitigation(task);
0739 break;
0740 case PR_SPEC_DISABLE_NOEXEC:
0741
0742
0743
0744
0745
0746 if (task_spec_ssb_force_disable(task) ||
0747 spectre_v4_mitigations_off() ||
0748 spectre_v4_mitigations_on()) {
0749 return -EPERM;
0750 }
0751
0752 ssbd_prctl_enable_mitigation(task);
0753 task_set_spec_ssb_noexec(task);
0754 break;
0755 default:
0756 return -ERANGE;
0757 }
0758
0759 spectre_v4_enable_task_mitigation(task);
0760 return 0;
0761 }
0762
0763 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
0764 unsigned long ctrl)
0765 {
0766 switch (which) {
0767 case PR_SPEC_STORE_BYPASS:
0768 return ssbd_prctl_set(task, ctrl);
0769 default:
0770 return -ENODEV;
0771 }
0772 }
0773
0774 static int ssbd_prctl_get(struct task_struct *task)
0775 {
0776 switch (spectre_v4_state) {
0777 case SPECTRE_UNAFFECTED:
0778 return PR_SPEC_NOT_AFFECTED;
0779 case SPECTRE_MITIGATED:
0780 if (spectre_v4_mitigations_on())
0781 return PR_SPEC_NOT_AFFECTED;
0782
0783 if (spectre_v4_mitigations_dynamic())
0784 break;
0785
0786
0787 fallthrough;
0788 case SPECTRE_VULNERABLE:
0789 fallthrough;
0790 default:
0791 return PR_SPEC_ENABLE;
0792 }
0793
0794
0795 if (task_spec_ssb_force_disable(task))
0796 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
0797
0798 if (task_spec_ssb_noexec(task))
0799 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
0800
0801 if (task_spec_ssb_disable(task))
0802 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
0803
0804 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
0805 }
0806
0807 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
0808 {
0809 switch (which) {
0810 case PR_SPEC_STORE_BYPASS:
0811 return ssbd_prctl_get(task);
0812 default:
0813 return -ENODEV;
0814 }
0815 }
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829 static enum mitigation_state spectre_bhb_state;
0830
0831 enum mitigation_state arm64_get_spectre_bhb_state(void)
0832 {
0833 return spectre_bhb_state;
0834 }
0835
0836 enum bhb_mitigation_bits {
0837 BHB_LOOP,
0838 BHB_FW,
0839 BHB_HW,
0840 BHB_INSN,
0841 };
0842 static unsigned long system_bhb_mitigations;
0843
0844
0845
0846
0847
0848 u8 spectre_bhb_loop_affected(int scope)
0849 {
0850 u8 k = 0;
0851 static u8 max_bhb_k;
0852
0853 if (scope == SCOPE_LOCAL_CPU) {
0854 static const struct midr_range spectre_bhb_k32_list[] = {
0855 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
0856 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
0857 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
0858 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
0859 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
0860 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
0861 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
0862 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
0863 {},
0864 };
0865 static const struct midr_range spectre_bhb_k24_list[] = {
0866 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
0867 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
0868 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
0869 {},
0870 };
0871 static const struct midr_range spectre_bhb_k8_list[] = {
0872 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
0873 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
0874 {},
0875 };
0876
0877 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
0878 k = 32;
0879 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
0880 k = 24;
0881 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
0882 k = 8;
0883
0884 max_bhb_k = max(max_bhb_k, k);
0885 } else {
0886 k = max_bhb_k;
0887 }
0888
0889 return k;
0890 }
0891
0892 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
0893 {
0894 int ret;
0895 struct arm_smccc_res res;
0896
0897 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0898 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
0899
0900 ret = res.a0;
0901 switch (ret) {
0902 case SMCCC_RET_SUCCESS:
0903 return SPECTRE_MITIGATED;
0904 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0905 return SPECTRE_UNAFFECTED;
0906 default:
0907 fallthrough;
0908 case SMCCC_RET_NOT_SUPPORTED:
0909 return SPECTRE_VULNERABLE;
0910 }
0911 }
0912
0913 static bool is_spectre_bhb_fw_affected(int scope)
0914 {
0915 static bool system_affected;
0916 enum mitigation_state fw_state;
0917 bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
0918 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
0919 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
0920 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
0921 {},
0922 };
0923 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
0924 spectre_bhb_firmware_mitigated_list);
0925
0926 if (scope != SCOPE_LOCAL_CPU)
0927 return system_affected;
0928
0929 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
0930 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
0931 system_affected = true;
0932 return true;
0933 }
0934
0935 return false;
0936 }
0937
0938 static bool supports_ecbhb(int scope)
0939 {
0940 u64 mmfr1;
0941
0942 if (scope == SCOPE_LOCAL_CPU)
0943 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
0944 else
0945 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
0946
0947 return cpuid_feature_extract_unsigned_field(mmfr1,
0948 ID_AA64MMFR1_ECBHB_SHIFT);
0949 }
0950
0951 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
0952 int scope)
0953 {
0954 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0955
0956 if (supports_csv2p3(scope))
0957 return false;
0958
0959 if (supports_clearbhb(scope))
0960 return true;
0961
0962 if (spectre_bhb_loop_affected(scope))
0963 return true;
0964
0965 if (is_spectre_bhb_fw_affected(scope))
0966 return true;
0967
0968 return false;
0969 }
0970
0971 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
0972 {
0973 const char *v = arm64_get_bp_hardening_vector(slot);
0974
0975 if (slot < 0)
0976 return;
0977
0978 __this_cpu_write(this_cpu_vector, v);
0979
0980
0981
0982
0983
0984 if (arm64_kernel_unmapped_at_el0())
0985 return;
0986
0987 write_sysreg(v, vbar_el1);
0988 isb();
0989 }
0990
0991 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
0992 {
0993 bp_hardening_cb_t cpu_cb;
0994 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
0995 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
0996
0997 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
0998 return;
0999
1000 if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1001
1002 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1003 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1004 } else if (cpu_mitigations_off()) {
1005 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1006 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1007 state = SPECTRE_MITIGATED;
1008 set_bit(BHB_HW, &system_bhb_mitigations);
1009 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1010
1011
1012
1013
1014 if (!data->slot)
1015 data->slot = HYP_VECTOR_INDIRECT;
1016
1017 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1018 state = SPECTRE_MITIGATED;
1019 set_bit(BHB_INSN, &system_bhb_mitigations);
1020 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1021
1022
1023
1024
1025
1026
1027 if (!data->slot)
1028 data->slot = HYP_VECTOR_INDIRECT;
1029
1030 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1031 state = SPECTRE_MITIGATED;
1032 set_bit(BHB_LOOP, &system_bhb_mitigations);
1033 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1034 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1035 if (fw_state == SPECTRE_MITIGATED) {
1036
1037
1038
1039
1040
1041
1042 if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1043 data->slot += 1;
1044
1045 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1046
1047
1048
1049
1050
1051
1052 cpu_cb = spectre_v2_get_sw_mitigation_cb();
1053 if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1054 __this_cpu_write(bp_hardening_data.fn, NULL);
1055
1056 state = SPECTRE_MITIGATED;
1057 set_bit(BHB_FW, &system_bhb_mitigations);
1058 }
1059 }
1060
1061 update_mitigation_state(&spectre_bhb_state, state);
1062 }
1063
1064
1065 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1066 __le32 *origptr,
1067 __le32 *updptr, int nr_inst)
1068 {
1069 BUG_ON(nr_inst != 1);
1070
1071 if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1072 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1073 }
1074
1075
1076 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1077 __le32 *origptr,
1078 __le32 *updptr, int nr_inst)
1079 {
1080 BUG_ON(nr_inst != 1);
1081
1082 if (test_bit(BHB_FW, &system_bhb_mitigations))
1083 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1084 }
1085
1086
1087 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1088 __le32 *origptr, __le32 *updptr, int nr_inst)
1089 {
1090 u8 rd;
1091 u32 insn;
1092 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1093
1094 BUG_ON(nr_inst != 1);
1095
1096 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1097 return;
1098
1099 insn = le32_to_cpu(*origptr);
1100 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1101 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1102 AARCH64_INSN_VARIANT_64BIT,
1103 AARCH64_INSN_MOVEWIDE_ZERO);
1104 *updptr++ = cpu_to_le32(insn);
1105 }
1106
1107
1108 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1109 __le32 *origptr, __le32 *updptr, int nr_inst)
1110 {
1111 u8 rd;
1112 u32 insn;
1113
1114 BUG_ON(nr_inst != 1);
1115
1116 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1117 !test_bit(BHB_FW, &system_bhb_mitigations))
1118 return;
1119
1120 insn = le32_to_cpu(*origptr);
1121 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1122
1123 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1124 AARCH64_INSN_VARIANT_32BIT,
1125 AARCH64_INSN_REG_ZR, rd,
1126 ARM_SMCCC_ARCH_WORKAROUND_3);
1127 if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1128 return;
1129
1130 *updptr++ = cpu_to_le32(insn);
1131 }
1132
1133
1134 void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
1135 __le32 *origptr, __le32 *updptr, int nr_inst)
1136 {
1137 BUG_ON(nr_inst != 2);
1138
1139 if (test_bit(BHB_INSN, &system_bhb_mitigations))
1140 return;
1141
1142 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1143 *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1144 }
1145
1146 #ifdef CONFIG_BPF_SYSCALL
1147 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
1148 void unpriv_ebpf_notify(int new_state)
1149 {
1150 if (spectre_v2_state == SPECTRE_VULNERABLE ||
1151 spectre_bhb_state != SPECTRE_MITIGATED)
1152 return;
1153
1154 if (!new_state)
1155 pr_err("WARNING: %s", EBPF_WARN);
1156 }
1157 #endif