0001
0002 #include <linux/arm-smccc.h>
0003 #include <linux/kernel.h>
0004 #include <linux/smp.h>
0005
0006 #include <asm/cp15.h>
0007 #include <asm/cputype.h>
0008 #include <asm/proc-fns.h>
0009 #include <asm/spectre.h>
0010 #include <asm/system_misc.h>
0011
0012 #ifdef CONFIG_ARM_PSCI
0013 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
0014 {
0015 struct arm_smccc_res res;
0016
0017 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
0018 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
0019
0020 switch ((int)res.a0) {
0021 case SMCCC_RET_SUCCESS:
0022 return SPECTRE_MITIGATED;
0023
0024 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
0025 return SPECTRE_UNAFFECTED;
0026
0027 default:
0028 return SPECTRE_VULNERABLE;
0029 }
0030 }
0031 #else
0032 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
0033 {
0034 return SPECTRE_VULNERABLE;
0035 }
0036 #endif
0037
0038 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
0039 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
0040
0041 extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
0042 extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
0043 extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
0044 extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
0045
0046 static void harden_branch_predictor_bpiall(void)
0047 {
0048 write_sysreg(0, BPIALL);
0049 }
0050
0051 static void harden_branch_predictor_iciallu(void)
0052 {
0053 write_sysreg(0, ICIALLU);
0054 }
0055
0056 static void __maybe_unused call_smc_arch_workaround_1(void)
0057 {
0058 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
0059 }
0060
0061 static void __maybe_unused call_hvc_arch_workaround_1(void)
0062 {
0063 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
0064 }
0065
0066 static unsigned int spectre_v2_install_workaround(unsigned int method)
0067 {
0068 const char *spectre_v2_method = NULL;
0069 int cpu = smp_processor_id();
0070
0071 if (per_cpu(harden_branch_predictor_fn, cpu))
0072 return SPECTRE_MITIGATED;
0073
0074 switch (method) {
0075 case SPECTRE_V2_METHOD_BPIALL:
0076 per_cpu(harden_branch_predictor_fn, cpu) =
0077 harden_branch_predictor_bpiall;
0078 spectre_v2_method = "BPIALL";
0079 break;
0080
0081 case SPECTRE_V2_METHOD_ICIALLU:
0082 per_cpu(harden_branch_predictor_fn, cpu) =
0083 harden_branch_predictor_iciallu;
0084 spectre_v2_method = "ICIALLU";
0085 break;
0086
0087 case SPECTRE_V2_METHOD_HVC:
0088 per_cpu(harden_branch_predictor_fn, cpu) =
0089 call_hvc_arch_workaround_1;
0090 cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
0091 spectre_v2_method = "hypervisor";
0092 break;
0093
0094 case SPECTRE_V2_METHOD_SMC:
0095 per_cpu(harden_branch_predictor_fn, cpu) =
0096 call_smc_arch_workaround_1;
0097 cpu_do_switch_mm = cpu_v7_smc_switch_mm;
0098 spectre_v2_method = "firmware";
0099 break;
0100 }
0101
0102 if (spectre_v2_method)
0103 pr_info("CPU%u: Spectre v2: using %s workaround\n",
0104 smp_processor_id(), spectre_v2_method);
0105
0106 return SPECTRE_MITIGATED;
0107 }
0108 #else
0109 static unsigned int spectre_v2_install_workaround(unsigned int method)
0110 {
0111 pr_info_once("Spectre V2: workarounds disabled by configuration\n");
0112
0113 return SPECTRE_VULNERABLE;
0114 }
0115 #endif
0116
0117 static void cpu_v7_spectre_v2_init(void)
0118 {
0119 unsigned int state, method = 0;
0120
0121 switch (read_cpuid_part()) {
0122 case ARM_CPU_PART_CORTEX_A8:
0123 case ARM_CPU_PART_CORTEX_A9:
0124 case ARM_CPU_PART_CORTEX_A12:
0125 case ARM_CPU_PART_CORTEX_A17:
0126 case ARM_CPU_PART_CORTEX_A73:
0127 case ARM_CPU_PART_CORTEX_A75:
0128 state = SPECTRE_MITIGATED;
0129 method = SPECTRE_V2_METHOD_BPIALL;
0130 break;
0131
0132 case ARM_CPU_PART_CORTEX_A15:
0133 case ARM_CPU_PART_BRAHMA_B15:
0134 state = SPECTRE_MITIGATED;
0135 method = SPECTRE_V2_METHOD_ICIALLU;
0136 break;
0137
0138 case ARM_CPU_PART_BRAHMA_B53:
0139
0140 state = SPECTRE_UNAFFECTED;
0141 break;
0142
0143 default:
0144
0145 if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
0146 state = SPECTRE_UNAFFECTED;
0147 break;
0148 }
0149
0150 fallthrough;
0151
0152
0153 case ARM_CPU_PART_CORTEX_A57:
0154 case ARM_CPU_PART_CORTEX_A72:
0155 state = spectre_v2_get_cpu_fw_mitigation_state();
0156 if (state != SPECTRE_MITIGATED)
0157 break;
0158
0159 switch (arm_smccc_1_1_get_conduit()) {
0160 case SMCCC_CONDUIT_HVC:
0161 method = SPECTRE_V2_METHOD_HVC;
0162 break;
0163
0164 case SMCCC_CONDUIT_SMC:
0165 method = SPECTRE_V2_METHOD_SMC;
0166 break;
0167
0168 default:
0169 state = SPECTRE_VULNERABLE;
0170 break;
0171 }
0172 }
0173
0174 if (state == SPECTRE_MITIGATED)
0175 state = spectre_v2_install_workaround(method);
0176
0177 spectre_v2_update_state(state, method);
0178 }
0179
0180 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
0181 static int spectre_bhb_method;
0182
0183 static const char *spectre_bhb_method_name(int method)
0184 {
0185 switch (method) {
0186 case SPECTRE_V2_METHOD_LOOP8:
0187 return "loop";
0188
0189 case SPECTRE_V2_METHOD_BPIALL:
0190 return "BPIALL";
0191
0192 default:
0193 return "unknown";
0194 }
0195 }
0196
0197 static int spectre_bhb_install_workaround(int method)
0198 {
0199 if (spectre_bhb_method != method) {
0200 if (spectre_bhb_method) {
0201 pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
0202 smp_processor_id());
0203
0204 return SPECTRE_VULNERABLE;
0205 }
0206
0207 if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
0208 return SPECTRE_VULNERABLE;
0209
0210 spectre_bhb_method = method;
0211
0212 pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
0213 smp_processor_id(), spectre_bhb_method_name(method));
0214 }
0215
0216 return SPECTRE_MITIGATED;
0217 }
0218 #else
0219 static int spectre_bhb_install_workaround(int method)
0220 {
0221 return SPECTRE_VULNERABLE;
0222 }
0223 #endif
0224
0225 static void cpu_v7_spectre_bhb_init(void)
0226 {
0227 unsigned int state, method = 0;
0228
0229 switch (read_cpuid_part()) {
0230 case ARM_CPU_PART_CORTEX_A15:
0231 case ARM_CPU_PART_BRAHMA_B15:
0232 case ARM_CPU_PART_CORTEX_A57:
0233 case ARM_CPU_PART_CORTEX_A72:
0234 state = SPECTRE_MITIGATED;
0235 method = SPECTRE_V2_METHOD_LOOP8;
0236 break;
0237
0238 case ARM_CPU_PART_CORTEX_A73:
0239 case ARM_CPU_PART_CORTEX_A75:
0240 state = SPECTRE_MITIGATED;
0241 method = SPECTRE_V2_METHOD_BPIALL;
0242 break;
0243
0244 default:
0245 state = SPECTRE_UNAFFECTED;
0246 break;
0247 }
0248
0249 if (state == SPECTRE_MITIGATED)
0250 state = spectre_bhb_install_workaround(method);
0251
0252 spectre_v2_update_state(state, method);
0253 }
0254
0255 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
0256 u32 mask, const char *msg)
0257 {
0258 u32 aux_cr;
0259
0260 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
0261
0262 if ((aux_cr & mask) != mask) {
0263 if (!*warned)
0264 pr_err("CPU%u: %s", smp_processor_id(), msg);
0265 *warned = true;
0266 return false;
0267 }
0268 return true;
0269 }
0270
0271 static DEFINE_PER_CPU(bool, spectre_warned);
0272
0273 static bool check_spectre_auxcr(bool *warned, u32 bit)
0274 {
0275 return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
0276 cpu_v7_check_auxcr_set(warned, bit,
0277 "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
0278 }
0279
0280 void cpu_v7_ca8_ibe(void)
0281 {
0282 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
0283 cpu_v7_spectre_v2_init();
0284 }
0285
0286 void cpu_v7_ca15_ibe(void)
0287 {
0288 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
0289 cpu_v7_spectre_v2_init();
0290 cpu_v7_spectre_bhb_init();
0291 }
0292
0293 void cpu_v7_bugs_init(void)
0294 {
0295 cpu_v7_spectre_v2_init();
0296 cpu_v7_spectre_bhb_init();
0297 }