Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
0004  * detailed at:
0005  *
0006  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
0007  *
0008  * This code was originally written hastily under an awful lot of stress and so
0009  * aspects of it are somewhat hacky. Unfortunately, changing anything in here
0010  * instantly makes me feel ill. Thanks, Jann. Thann.
0011  *
0012  * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
0013  * Copyright (C) 2020 Google LLC
0014  *
0015  * "If there's something strange in your neighbourhood, who you gonna call?"
0016  *
0017  * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
0018  */
0019 
0020 #include <linux/arm-smccc.h>
0021 #include <linux/bpf.h>
0022 #include <linux/cpu.h>
0023 #include <linux/device.h>
0024 #include <linux/nospec.h>
0025 #include <linux/prctl.h>
0026 #include <linux/sched/task_stack.h>
0027 
0028 #include <asm/debug-monitors.h>
0029 #include <asm/insn.h>
0030 #include <asm/spectre.h>
0031 #include <asm/traps.h>
0032 #include <asm/vectors.h>
0033 #include <asm/virt.h>
0034 
0035 /*
0036  * We try to ensure that the mitigation state can never change as the result of
0037  * onlining a late CPU.
0038  */
0039 static void update_mitigation_state(enum mitigation_state *oldp,
0040                     enum mitigation_state new)
0041 {
0042     enum mitigation_state state;
0043 
0044     do {
0045         state = READ_ONCE(*oldp);
0046         if (new <= state)
0047             break;
0048 
0049         /* Userspace almost certainly can't deal with this. */
0050         if (WARN_ON(system_capabilities_finalized()))
0051             break;
0052     } while (cmpxchg_relaxed(oldp, state, new) != state);
0053 }
0054 
0055 /*
0056  * Spectre v1.
0057  *
0058  * The kernel can't protect userspace for this one: it's each person for
0059  * themselves. Advertise what we're doing and be done with it.
0060  */
0061 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
0062                 char *buf)
0063 {
0064     return sprintf(buf, "Mitigation: __user pointer sanitization\n");
0065 }
0066 
0067 /*
0068  * Spectre v2.
0069  *
0070  * This one sucks. A CPU is either:
0071  *
0072  * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
0073  * - Mitigated in hardware and listed in our "safe list".
0074  * - Mitigated in software by firmware.
0075  * - Mitigated in software by a CPU-specific dance in the kernel and a
0076  *   firmware call at EL2.
0077  * - Vulnerable.
0078  *
0079  * It's not unlikely for different CPUs in a big.LITTLE system to fall into
0080  * different camps.
0081  */
0082 static enum mitigation_state spectre_v2_state;
0083 
0084 static bool __read_mostly __nospectre_v2;
0085 static int __init parse_spectre_v2_param(char *str)
0086 {
0087     __nospectre_v2 = true;
0088     return 0;
0089 }
0090 early_param("nospectre_v2", parse_spectre_v2_param);
0091 
0092 static bool spectre_v2_mitigations_off(void)
0093 {
0094     bool ret = __nospectre_v2 || cpu_mitigations_off();
0095 
0096     if (ret)
0097         pr_info_once("spectre-v2 mitigation disabled by command line option\n");
0098 
0099     return ret;
0100 }
0101 
0102 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
0103 {
0104     switch (bhb_state) {
0105     case SPECTRE_UNAFFECTED:
0106         return "";
0107     default:
0108     case SPECTRE_VULNERABLE:
0109         return ", but not BHB";
0110     case SPECTRE_MITIGATED:
0111         return ", BHB";
0112     }
0113 }
0114 
0115 static bool _unprivileged_ebpf_enabled(void)
0116 {
0117 #ifdef CONFIG_BPF_SYSCALL
0118     return !sysctl_unprivileged_bpf_disabled;
0119 #else
0120     return false;
0121 #endif
0122 }
0123 
0124 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
0125                 char *buf)
0126 {
0127     enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
0128     const char *bhb_str = get_bhb_affected_string(bhb_state);
0129     const char *v2_str = "Branch predictor hardening";
0130 
0131     switch (spectre_v2_state) {
0132     case SPECTRE_UNAFFECTED:
0133         if (bhb_state == SPECTRE_UNAFFECTED)
0134             return sprintf(buf, "Not affected\n");
0135 
0136         /*
0137          * Platforms affected by Spectre-BHB can't report
0138          * "Not affected" for Spectre-v2.
0139          */
0140         v2_str = "CSV2";
0141         fallthrough;
0142     case SPECTRE_MITIGATED:
0143         if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
0144             return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
0145 
0146         return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
0147     case SPECTRE_VULNERABLE:
0148         fallthrough;
0149     default:
0150         return sprintf(buf, "Vulnerable\n");
0151     }
0152 }
0153 
0154 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
0155 {
0156     u64 pfr0;
0157     static const struct midr_range spectre_v2_safe_list[] = {
0158         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
0159         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
0160         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
0161         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
0162         MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
0163         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
0164         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
0165         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
0166         { /* sentinel */ }
0167     };
0168 
0169     /* If the CPU has CSV2 set, we're safe */
0170     pfr0 = read_cpuid(ID_AA64PFR0_EL1);
0171     if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
0172         return SPECTRE_UNAFFECTED;
0173 
0174     /* Alternatively, we have a list of unaffected CPUs */
0175     if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
0176         return SPECTRE_UNAFFECTED;
0177 
0178     return SPECTRE_VULNERABLE;
0179 }
0180 
0181 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
0182 {
0183     int ret;
0184     struct arm_smccc_res res;
0185 
0186     arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0187                  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
0188 
0189     ret = res.a0;
0190     switch (ret) {
0191     case SMCCC_RET_SUCCESS:
0192         return SPECTRE_MITIGATED;
0193     case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0194         return SPECTRE_UNAFFECTED;
0195     default:
0196         fallthrough;
0197     case SMCCC_RET_NOT_SUPPORTED:
0198         return SPECTRE_VULNERABLE;
0199     }
0200 }
0201 
0202 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
0203 {
0204     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0205 
0206     if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
0207         return false;
0208 
0209     if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
0210         return false;
0211 
0212     return true;
0213 }
0214 
0215 enum mitigation_state arm64_get_spectre_v2_state(void)
0216 {
0217     return spectre_v2_state;
0218 }
0219 
0220 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
0221 
0222 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
0223 {
0224     __this_cpu_write(bp_hardening_data.fn, fn);
0225 
0226     /*
0227      * Vinz Clortho takes the hyp_vecs start/end "keys" at
0228      * the door when we're a guest. Skip the hyp-vectors work.
0229      */
0230     if (!is_hyp_mode_available())
0231         return;
0232 
0233     __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
0234 }
0235 
0236 /* Called during entry so must be noinstr */
0237 static noinstr void call_smc_arch_workaround_1(void)
0238 {
0239     arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
0240 }
0241 
0242 /* Called during entry so must be noinstr */
0243 static noinstr void call_hvc_arch_workaround_1(void)
0244 {
0245     arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
0246 }
0247 
0248 /* Called during entry so must be noinstr */
0249 static noinstr void qcom_link_stack_sanitisation(void)
0250 {
0251     u64 tmp;
0252 
0253     asm volatile("mov   %0, x30     \n"
0254              ".rept 16      \n"
0255              "bl    . + 4       \n"
0256              ".endr         \n"
0257              "mov   x30, %0     \n"
0258              : "=&r" (tmp));
0259 }
0260 
0261 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
0262 {
0263     u32 midr = read_cpuid_id();
0264     if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
0265         ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
0266         return NULL;
0267 
0268     return qcom_link_stack_sanitisation;
0269 }
0270 
0271 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
0272 {
0273     bp_hardening_cb_t cb;
0274     enum mitigation_state state;
0275 
0276     state = spectre_v2_get_cpu_fw_mitigation_state();
0277     if (state != SPECTRE_MITIGATED)
0278         return state;
0279 
0280     if (spectre_v2_mitigations_off())
0281         return SPECTRE_VULNERABLE;
0282 
0283     switch (arm_smccc_1_1_get_conduit()) {
0284     case SMCCC_CONDUIT_HVC:
0285         cb = call_hvc_arch_workaround_1;
0286         break;
0287 
0288     case SMCCC_CONDUIT_SMC:
0289         cb = call_smc_arch_workaround_1;
0290         break;
0291 
0292     default:
0293         return SPECTRE_VULNERABLE;
0294     }
0295 
0296     /*
0297      * Prefer a CPU-specific workaround if it exists. Note that we
0298      * still rely on firmware for the mitigation at EL2.
0299      */
0300     cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
0301     install_bp_hardening_cb(cb);
0302     return SPECTRE_MITIGATED;
0303 }
0304 
0305 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
0306 {
0307     enum mitigation_state state;
0308 
0309     WARN_ON(preemptible());
0310 
0311     state = spectre_v2_get_cpu_hw_mitigation_state();
0312     if (state == SPECTRE_VULNERABLE)
0313         state = spectre_v2_enable_fw_mitigation();
0314 
0315     update_mitigation_state(&spectre_v2_state, state);
0316 }
0317 
0318 /*
0319  * Spectre-v3a.
0320  *
0321  * Phew, there's not an awful lot to do here! We just instruct EL2 to use
0322  * an indirect trampoline for the hyp vectors so that guests can't read
0323  * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
0324  */
0325 bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
0326 {
0327     static const struct midr_range spectre_v3a_unsafe_list[] = {
0328         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
0329         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
0330         {},
0331     };
0332 
0333     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0334     return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
0335 }
0336 
0337 void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
0338 {
0339     struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
0340 
0341     if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
0342         data->slot += HYP_VECTOR_INDIRECT;
0343 }
0344 
0345 /*
0346  * Spectre v4.
0347  *
0348  * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
0349  * either:
0350  *
0351  * - Mitigated in hardware and listed in our "safe list".
0352  * - Mitigated in hardware via PSTATE.SSBS.
0353  * - Mitigated in software by firmware (sometimes referred to as SSBD).
0354  *
0355  * Wait, that doesn't sound so bad, does it? Keep reading...
0356  *
0357  * A major source of headaches is that the software mitigation is enabled both
0358  * on a per-task basis, but can also be forced on for the kernel, necessitating
0359  * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
0360  * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
0361  * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
0362  * so you can have systems that have both firmware and SSBS mitigations. This
0363  * means we actually have to reject late onlining of CPUs with mitigations if
0364  * all of the currently onlined CPUs are safelisted, as the mitigation tends to
0365  * be opt-in for userspace. Yes, really, the cure is worse than the disease.
0366  *
0367  * The only good part is that if the firmware mitigation is present, then it is
0368  * present for all CPUs, meaning we don't have to worry about late onlining of a
0369  * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
0370  *
0371  * Give me a VAX-11/780 any day of the week...
0372  */
0373 static enum mitigation_state spectre_v4_state;
0374 
0375 /* This is the per-cpu state tracking whether we need to talk to firmware */
0376 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
0377 
0378 enum spectre_v4_policy {
0379     SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
0380     SPECTRE_V4_POLICY_MITIGATION_ENABLED,
0381     SPECTRE_V4_POLICY_MITIGATION_DISABLED,
0382 };
0383 
0384 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
0385 
0386 static const struct spectre_v4_param {
0387     const char      *str;
0388     enum spectre_v4_policy  policy;
0389 } spectre_v4_params[] = {
0390     { "force-on",   SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
0391     { "force-off",  SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
0392     { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
0393 };
0394 static int __init parse_spectre_v4_param(char *str)
0395 {
0396     int i;
0397 
0398     if (!str || !str[0])
0399         return -EINVAL;
0400 
0401     for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
0402         const struct spectre_v4_param *param = &spectre_v4_params[i];
0403 
0404         if (strncmp(str, param->str, strlen(param->str)))
0405             continue;
0406 
0407         __spectre_v4_policy = param->policy;
0408         return 0;
0409     }
0410 
0411     return -EINVAL;
0412 }
0413 early_param("ssbd", parse_spectre_v4_param);
0414 
0415 /*
0416  * Because this was all written in a rush by people working in different silos,
0417  * we've ended up with multiple command line options to control the same thing.
0418  * Wrap these up in some helpers, which prefer disabling the mitigation if faced
0419  * with contradictory parameters. The mitigation is always either "off",
0420  * "dynamic" or "on".
0421  */
0422 static bool spectre_v4_mitigations_off(void)
0423 {
0424     bool ret = cpu_mitigations_off() ||
0425            __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
0426 
0427     if (ret)
0428         pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
0429 
0430     return ret;
0431 }
0432 
0433 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
0434 static bool spectre_v4_mitigations_dynamic(void)
0435 {
0436     return !spectre_v4_mitigations_off() &&
0437            __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
0438 }
0439 
0440 static bool spectre_v4_mitigations_on(void)
0441 {
0442     return !spectre_v4_mitigations_off() &&
0443            __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
0444 }
0445 
0446 ssize_t cpu_show_spec_store_bypass(struct device *dev,
0447                    struct device_attribute *attr, char *buf)
0448 {
0449     switch (spectre_v4_state) {
0450     case SPECTRE_UNAFFECTED:
0451         return sprintf(buf, "Not affected\n");
0452     case SPECTRE_MITIGATED:
0453         return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
0454     case SPECTRE_VULNERABLE:
0455         fallthrough;
0456     default:
0457         return sprintf(buf, "Vulnerable\n");
0458     }
0459 }
0460 
0461 enum mitigation_state arm64_get_spectre_v4_state(void)
0462 {
0463     return spectre_v4_state;
0464 }
0465 
0466 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
0467 {
0468     static const struct midr_range spectre_v4_safe_list[] = {
0469         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
0470         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
0471         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
0472         MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
0473         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
0474         MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
0475         { /* sentinel */ },
0476     };
0477 
0478     if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
0479         return SPECTRE_UNAFFECTED;
0480 
0481     /* CPU features are detected first */
0482     if (this_cpu_has_cap(ARM64_SSBS))
0483         return SPECTRE_MITIGATED;
0484 
0485     return SPECTRE_VULNERABLE;
0486 }
0487 
0488 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
0489 {
0490     int ret;
0491     struct arm_smccc_res res;
0492 
0493     arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0494                  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
0495 
0496     ret = res.a0;
0497     switch (ret) {
0498     case SMCCC_RET_SUCCESS:
0499         return SPECTRE_MITIGATED;
0500     case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0501         fallthrough;
0502     case SMCCC_RET_NOT_REQUIRED:
0503         return SPECTRE_UNAFFECTED;
0504     default:
0505         fallthrough;
0506     case SMCCC_RET_NOT_SUPPORTED:
0507         return SPECTRE_VULNERABLE;
0508     }
0509 }
0510 
0511 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
0512 {
0513     enum mitigation_state state;
0514 
0515     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0516 
0517     state = spectre_v4_get_cpu_hw_mitigation_state();
0518     if (state == SPECTRE_VULNERABLE)
0519         state = spectre_v4_get_cpu_fw_mitigation_state();
0520 
0521     return state != SPECTRE_UNAFFECTED;
0522 }
0523 
0524 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
0525 {
0526     if (user_mode(regs))
0527         return 1;
0528 
0529     if (instr & BIT(PSTATE_Imm_shift))
0530         regs->pstate |= PSR_SSBS_BIT;
0531     else
0532         regs->pstate &= ~PSR_SSBS_BIT;
0533 
0534     arm64_skip_faulting_instruction(regs, 4);
0535     return 0;
0536 }
0537 
0538 static struct undef_hook ssbs_emulation_hook = {
0539     .instr_mask = ~(1U << PSTATE_Imm_shift),
0540     .instr_val  = 0xd500401f | PSTATE_SSBS,
0541     .fn     = ssbs_emulation_handler,
0542 };
0543 
0544 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
0545 {
0546     static bool undef_hook_registered = false;
0547     static DEFINE_RAW_SPINLOCK(hook_lock);
0548     enum mitigation_state state;
0549 
0550     /*
0551      * If the system is mitigated but this CPU doesn't have SSBS, then
0552      * we must be on the safelist and there's nothing more to do.
0553      */
0554     state = spectre_v4_get_cpu_hw_mitigation_state();
0555     if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
0556         return state;
0557 
0558     raw_spin_lock(&hook_lock);
0559     if (!undef_hook_registered) {
0560         register_undef_hook(&ssbs_emulation_hook);
0561         undef_hook_registered = true;
0562     }
0563     raw_spin_unlock(&hook_lock);
0564 
0565     if (spectre_v4_mitigations_off()) {
0566         sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
0567         set_pstate_ssbs(1);
0568         return SPECTRE_VULNERABLE;
0569     }
0570 
0571     /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
0572     set_pstate_ssbs(0);
0573     return SPECTRE_MITIGATED;
0574 }
0575 
0576 /*
0577  * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
0578  * we fallthrough and check whether firmware needs to be called on this CPU.
0579  */
0580 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
0581                           __le32 *origptr,
0582                           __le32 *updptr, int nr_inst)
0583 {
0584     BUG_ON(nr_inst != 1); /* Branch -> NOP */
0585 
0586     if (spectre_v4_mitigations_off())
0587         return;
0588 
0589     if (cpus_have_final_cap(ARM64_SSBS))
0590         return;
0591 
0592     if (spectre_v4_mitigations_dynamic())
0593         *updptr = cpu_to_le32(aarch64_insn_gen_nop());
0594 }
0595 
0596 /*
0597  * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
0598  * to call into firmware to adjust the mitigation state.
0599  */
0600 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
0601                            __le32 *origptr,
0602                            __le32 *updptr, int nr_inst)
0603 {
0604     u32 insn;
0605 
0606     BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
0607 
0608     switch (arm_smccc_1_1_get_conduit()) {
0609     case SMCCC_CONDUIT_HVC:
0610         insn = aarch64_insn_get_hvc_value();
0611         break;
0612     case SMCCC_CONDUIT_SMC:
0613         insn = aarch64_insn_get_smc_value();
0614         break;
0615     default:
0616         return;
0617     }
0618 
0619     *updptr = cpu_to_le32(insn);
0620 }
0621 
0622 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
0623 {
0624     enum mitigation_state state;
0625 
0626     state = spectre_v4_get_cpu_fw_mitigation_state();
0627     if (state != SPECTRE_MITIGATED)
0628         return state;
0629 
0630     if (spectre_v4_mitigations_off()) {
0631         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
0632         return SPECTRE_VULNERABLE;
0633     }
0634 
0635     arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
0636 
0637     if (spectre_v4_mitigations_dynamic())
0638         __this_cpu_write(arm64_ssbd_callback_required, 1);
0639 
0640     return SPECTRE_MITIGATED;
0641 }
0642 
0643 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
0644 {
0645     enum mitigation_state state;
0646 
0647     WARN_ON(preemptible());
0648 
0649     state = spectre_v4_enable_hw_mitigation();
0650     if (state == SPECTRE_VULNERABLE)
0651         state = spectre_v4_enable_fw_mitigation();
0652 
0653     update_mitigation_state(&spectre_v4_state, state);
0654 }
0655 
0656 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
0657 {
0658     u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
0659 
0660     if (state)
0661         regs->pstate |= bit;
0662     else
0663         regs->pstate &= ~bit;
0664 }
0665 
0666 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
0667 {
0668     struct pt_regs *regs = task_pt_regs(tsk);
0669     bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
0670 
0671     if (spectre_v4_mitigations_off())
0672         ssbs = true;
0673     else if (spectre_v4_mitigations_dynamic() && !kthread)
0674         ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
0675 
0676     __update_pstate_ssbs(regs, ssbs);
0677 }
0678 
0679 /*
0680  * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
0681  * This is interesting because the "speculation disabled" behaviour can be
0682  * configured so that it is preserved across exec(), which means that the
0683  * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
0684  * from userspace.
0685  */
0686 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
0687 {
0688     task_clear_spec_ssb_noexec(task);
0689     task_set_spec_ssb_disable(task);
0690     set_tsk_thread_flag(task, TIF_SSBD);
0691 }
0692 
0693 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
0694 {
0695     task_clear_spec_ssb_noexec(task);
0696     task_clear_spec_ssb_disable(task);
0697     clear_tsk_thread_flag(task, TIF_SSBD);
0698 }
0699 
0700 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
0701 {
0702     switch (ctrl) {
0703     case PR_SPEC_ENABLE:
0704         /* Enable speculation: disable mitigation */
0705         /*
0706          * Force disabled speculation prevents it from being
0707          * re-enabled.
0708          */
0709         if (task_spec_ssb_force_disable(task))
0710             return -EPERM;
0711 
0712         /*
0713          * If the mitigation is forced on, then speculation is forced
0714          * off and we again prevent it from being re-enabled.
0715          */
0716         if (spectre_v4_mitigations_on())
0717             return -EPERM;
0718 
0719         ssbd_prctl_disable_mitigation(task);
0720         break;
0721     case PR_SPEC_FORCE_DISABLE:
0722         /* Force disable speculation: force enable mitigation */
0723         /*
0724          * If the mitigation is forced off, then speculation is forced
0725          * on and we prevent it from being disabled.
0726          */
0727         if (spectre_v4_mitigations_off())
0728             return -EPERM;
0729 
0730         task_set_spec_ssb_force_disable(task);
0731         fallthrough;
0732     case PR_SPEC_DISABLE:
0733         /* Disable speculation: enable mitigation */
0734         /* Same as PR_SPEC_FORCE_DISABLE */
0735         if (spectre_v4_mitigations_off())
0736             return -EPERM;
0737 
0738         ssbd_prctl_enable_mitigation(task);
0739         break;
0740     case PR_SPEC_DISABLE_NOEXEC:
0741         /* Disable speculation until execve(): enable mitigation */
0742         /*
0743          * If the mitigation state is forced one way or the other, then
0744          * we must fail now before we try to toggle it on execve().
0745          */
0746         if (task_spec_ssb_force_disable(task) ||
0747             spectre_v4_mitigations_off() ||
0748             spectre_v4_mitigations_on()) {
0749             return -EPERM;
0750         }
0751 
0752         ssbd_prctl_enable_mitigation(task);
0753         task_set_spec_ssb_noexec(task);
0754         break;
0755     default:
0756         return -ERANGE;
0757     }
0758 
0759     spectre_v4_enable_task_mitigation(task);
0760     return 0;
0761 }
0762 
0763 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
0764                  unsigned long ctrl)
0765 {
0766     switch (which) {
0767     case PR_SPEC_STORE_BYPASS:
0768         return ssbd_prctl_set(task, ctrl);
0769     default:
0770         return -ENODEV;
0771     }
0772 }
0773 
0774 static int ssbd_prctl_get(struct task_struct *task)
0775 {
0776     switch (spectre_v4_state) {
0777     case SPECTRE_UNAFFECTED:
0778         return PR_SPEC_NOT_AFFECTED;
0779     case SPECTRE_MITIGATED:
0780         if (spectre_v4_mitigations_on())
0781             return PR_SPEC_NOT_AFFECTED;
0782 
0783         if (spectre_v4_mitigations_dynamic())
0784             break;
0785 
0786         /* Mitigations are disabled, so we're vulnerable. */
0787         fallthrough;
0788     case SPECTRE_VULNERABLE:
0789         fallthrough;
0790     default:
0791         return PR_SPEC_ENABLE;
0792     }
0793 
0794     /* Check the mitigation state for this task */
0795     if (task_spec_ssb_force_disable(task))
0796         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
0797 
0798     if (task_spec_ssb_noexec(task))
0799         return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
0800 
0801     if (task_spec_ssb_disable(task))
0802         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
0803 
0804     return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
0805 }
0806 
0807 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
0808 {
0809     switch (which) {
0810     case PR_SPEC_STORE_BYPASS:
0811         return ssbd_prctl_get(task);
0812     default:
0813         return -ENODEV;
0814     }
0815 }
0816 
0817 /*
0818  * Spectre BHB.
0819  *
0820  * A CPU is either:
0821  * - Mitigated by a branchy loop a CPU specific number of times, and listed
0822  *   in our "loop mitigated list".
0823  * - Mitigated in software by the firmware Spectre v2 call.
0824  * - Has the ClearBHB instruction to perform the mitigation.
0825  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
0826  *   software mitigation in the vectors is needed.
0827  * - Has CSV2.3, so is unaffected.
0828  */
0829 static enum mitigation_state spectre_bhb_state;
0830 
0831 enum mitigation_state arm64_get_spectre_bhb_state(void)
0832 {
0833     return spectre_bhb_state;
0834 }
0835 
0836 enum bhb_mitigation_bits {
0837     BHB_LOOP,
0838     BHB_FW,
0839     BHB_HW,
0840     BHB_INSN,
0841 };
0842 static unsigned long system_bhb_mitigations;
0843 
0844 /*
0845  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
0846  * SCOPE_SYSTEM call will give the right answer.
0847  */
0848 u8 spectre_bhb_loop_affected(int scope)
0849 {
0850     u8 k = 0;
0851     static u8 max_bhb_k;
0852 
0853     if (scope == SCOPE_LOCAL_CPU) {
0854         static const struct midr_range spectre_bhb_k32_list[] = {
0855             MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
0856             MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
0857             MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
0858             MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
0859             MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
0860             MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
0861             MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
0862             MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
0863             {},
0864         };
0865         static const struct midr_range spectre_bhb_k24_list[] = {
0866             MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
0867             MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
0868             MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
0869             {},
0870         };
0871         static const struct midr_range spectre_bhb_k8_list[] = {
0872             MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
0873             MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
0874             {},
0875         };
0876 
0877         if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
0878             k = 32;
0879         else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
0880             k = 24;
0881         else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
0882             k =  8;
0883 
0884         max_bhb_k = max(max_bhb_k, k);
0885     } else {
0886         k = max_bhb_k;
0887     }
0888 
0889     return k;
0890 }
0891 
0892 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
0893 {
0894     int ret;
0895     struct arm_smccc_res res;
0896 
0897     arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0898                  ARM_SMCCC_ARCH_WORKAROUND_3, &res);
0899 
0900     ret = res.a0;
0901     switch (ret) {
0902     case SMCCC_RET_SUCCESS:
0903         return SPECTRE_MITIGATED;
0904     case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0905         return SPECTRE_UNAFFECTED;
0906     default:
0907         fallthrough;
0908     case SMCCC_RET_NOT_SUPPORTED:
0909         return SPECTRE_VULNERABLE;
0910     }
0911 }
0912 
0913 static bool is_spectre_bhb_fw_affected(int scope)
0914 {
0915     static bool system_affected;
0916     enum mitigation_state fw_state;
0917     bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
0918     static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
0919         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
0920         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
0921         {},
0922     };
0923     bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
0924                      spectre_bhb_firmware_mitigated_list);
0925 
0926     if (scope != SCOPE_LOCAL_CPU)
0927         return system_affected;
0928 
0929     fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
0930     if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
0931         system_affected = true;
0932         return true;
0933     }
0934 
0935     return false;
0936 }
0937 
0938 static bool supports_ecbhb(int scope)
0939 {
0940     u64 mmfr1;
0941 
0942     if (scope == SCOPE_LOCAL_CPU)
0943         mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
0944     else
0945         mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
0946 
0947     return cpuid_feature_extract_unsigned_field(mmfr1,
0948                             ID_AA64MMFR1_ECBHB_SHIFT);
0949 }
0950 
0951 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
0952                  int scope)
0953 {
0954     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0955 
0956     if (supports_csv2p3(scope))
0957         return false;
0958 
0959     if (supports_clearbhb(scope))
0960         return true;
0961 
0962     if (spectre_bhb_loop_affected(scope))
0963         return true;
0964 
0965     if (is_spectre_bhb_fw_affected(scope))
0966         return true;
0967 
0968     return false;
0969 }
0970 
0971 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
0972 {
0973     const char *v = arm64_get_bp_hardening_vector(slot);
0974 
0975     if (slot < 0)
0976         return;
0977 
0978     __this_cpu_write(this_cpu_vector, v);
0979 
0980     /*
0981      * When KPTI is in use, the vectors are switched when exiting to
0982      * user-space.
0983      */
0984     if (arm64_kernel_unmapped_at_el0())
0985         return;
0986 
0987     write_sysreg(v, vbar_el1);
0988     isb();
0989 }
0990 
0991 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
0992 {
0993     bp_hardening_cb_t cpu_cb;
0994     enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
0995     struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
0996 
0997     if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
0998         return;
0999 
1000     if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1001         /* No point mitigating Spectre-BHB alone. */
1002     } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1003         pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1004     } else if (cpu_mitigations_off()) {
1005         pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1006     } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1007         state = SPECTRE_MITIGATED;
1008         set_bit(BHB_HW, &system_bhb_mitigations);
1009     } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1010         /*
1011          * Ensure KVM uses the indirect vector which will have ClearBHB
1012          * added.
1013          */
1014         if (!data->slot)
1015             data->slot = HYP_VECTOR_INDIRECT;
1016 
1017         this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1018         state = SPECTRE_MITIGATED;
1019         set_bit(BHB_INSN, &system_bhb_mitigations);
1020     } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1021         /*
1022          * Ensure KVM uses the indirect vector which will have the
1023          * branchy-loop added. A57/A72-r0 will already have selected
1024          * the spectre-indirect vector, which is sufficient for BHB
1025          * too.
1026          */
1027         if (!data->slot)
1028             data->slot = HYP_VECTOR_INDIRECT;
1029 
1030         this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1031         state = SPECTRE_MITIGATED;
1032         set_bit(BHB_LOOP, &system_bhb_mitigations);
1033     } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1034         fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1035         if (fw_state == SPECTRE_MITIGATED) {
1036             /*
1037              * Ensure KVM uses one of the spectre bp_hardening
1038              * vectors. The indirect vector doesn't include the EL3
1039              * call, so needs upgrading to
1040              * HYP_VECTOR_SPECTRE_INDIRECT.
1041              */
1042             if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1043                 data->slot += 1;
1044 
1045             this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1046 
1047             /*
1048              * The WA3 call in the vectors supersedes the WA1 call
1049              * made during context-switch. Uninstall any firmware
1050              * bp_hardening callback.
1051              */
1052             cpu_cb = spectre_v2_get_sw_mitigation_cb();
1053             if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1054                 __this_cpu_write(bp_hardening_data.fn, NULL);
1055 
1056             state = SPECTRE_MITIGATED;
1057             set_bit(BHB_FW, &system_bhb_mitigations);
1058         }
1059     }
1060 
1061     update_mitigation_state(&spectre_bhb_state, state);
1062 }
1063 
1064 /* Patched to NOP when enabled */
1065 void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1066                              __le32 *origptr,
1067                               __le32 *updptr, int nr_inst)
1068 {
1069     BUG_ON(nr_inst != 1);
1070 
1071     if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1072         *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1073 }
1074 
1075 /* Patched to NOP when enabled */
1076 void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1077                            __le32 *origptr,
1078                            __le32 *updptr, int nr_inst)
1079 {
1080     BUG_ON(nr_inst != 1);
1081 
1082     if (test_bit(BHB_FW, &system_bhb_mitigations))
1083         *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1084 }
1085 
1086 /* Patched to correct the immediate */
1087 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1088                    __le32 *origptr, __le32 *updptr, int nr_inst)
1089 {
1090     u8 rd;
1091     u32 insn;
1092     u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1093 
1094     BUG_ON(nr_inst != 1); /* MOV -> MOV */
1095 
1096     if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1097         return;
1098 
1099     insn = le32_to_cpu(*origptr);
1100     rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1101     insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1102                      AARCH64_INSN_VARIANT_64BIT,
1103                      AARCH64_INSN_MOVEWIDE_ZERO);
1104     *updptr++ = cpu_to_le32(insn);
1105 }
1106 
1107 /* Patched to mov WA3 when supported */
1108 void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1109                    __le32 *origptr, __le32 *updptr, int nr_inst)
1110 {
1111     u8 rd;
1112     u32 insn;
1113 
1114     BUG_ON(nr_inst != 1); /* MOV -> MOV */
1115 
1116     if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1117         !test_bit(BHB_FW, &system_bhb_mitigations))
1118         return;
1119 
1120     insn = le32_to_cpu(*origptr);
1121     rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1122 
1123     insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1124                           AARCH64_INSN_VARIANT_32BIT,
1125                           AARCH64_INSN_REG_ZR, rd,
1126                           ARM_SMCCC_ARCH_WORKAROUND_3);
1127     if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1128         return;
1129 
1130     *updptr++ = cpu_to_le32(insn);
1131 }
1132 
1133 /* Patched to NOP when not supported */
1134 void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
1135                    __le32 *origptr, __le32 *updptr, int nr_inst)
1136 {
1137     BUG_ON(nr_inst != 2);
1138 
1139     if (test_bit(BHB_INSN, &system_bhb_mitigations))
1140         return;
1141 
1142     *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1143     *updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1144 }
1145 
1146 #ifdef CONFIG_BPF_SYSCALL
1147 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
1148 void unpriv_ebpf_notify(int new_state)
1149 {
1150     if (spectre_v2_state == SPECTRE_VULNERABLE ||
1151         spectre_bhb_state != SPECTRE_MITIGATED)
1152         return;
1153 
1154     if (!new_state)
1155         pr_err("WARNING: %s", EBPF_WARN);
1156 }
1157 #endif