0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 #define pr_fmt(fmt) "CPU features: " fmt
0064
0065 #include <linux/bsearch.h>
0066 #include <linux/cpumask.h>
0067 #include <linux/crash_dump.h>
0068 #include <linux/sort.h>
0069 #include <linux/stop_machine.h>
0070 #include <linux/sysfs.h>
0071 #include <linux/types.h>
0072 #include <linux/minmax.h>
0073 #include <linux/mm.h>
0074 #include <linux/cpu.h>
0075 #include <linux/kasan.h>
0076 #include <linux/percpu.h>
0077
0078 #include <asm/cpu.h>
0079 #include <asm/cpufeature.h>
0080 #include <asm/cpu_ops.h>
0081 #include <asm/fpsimd.h>
0082 #include <asm/hwcap.h>
0083 #include <asm/insn.h>
0084 #include <asm/kvm_host.h>
0085 #include <asm/mmu_context.h>
0086 #include <asm/mte.h>
0087 #include <asm/processor.h>
0088 #include <asm/smp.h>
0089 #include <asm/sysreg.h>
0090 #include <asm/traps.h>
0091 #include <asm/vectors.h>
0092 #include <asm/virt.h>
0093
0094
0095 static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly;
0096
0097 #ifdef CONFIG_COMPAT
0098 #define COMPAT_ELF_HWCAP_DEFAULT \
0099 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
0100 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
0101 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
0102 COMPAT_HWCAP_LPAE)
0103 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
0104 unsigned int compat_elf_hwcap2 __read_mostly;
0105 #endif
0106
0107 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
0108 EXPORT_SYMBOL(cpu_hwcaps);
0109 static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
0110
0111
0112 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
0113
0114 bool arm64_use_ng_mappings = false;
0115 EXPORT_SYMBOL(arm64_use_ng_mappings);
0116
0117 DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
0118
0119
0120
0121
0122
0123 static bool __read_mostly allow_mismatched_32bit_el0;
0124
0125
0126
0127
0128
0129 DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
0130
0131
0132
0133
0134
0135 static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
0147 EXPORT_SYMBOL(arm64_const_caps_ready);
0148 static inline void finalize_system_capabilities(void)
0149 {
0150 static_branch_enable(&arm64_const_caps_ready);
0151 }
0152
0153 void dump_cpu_features(void)
0154 {
0155
0156 pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
0157 }
0158
0159 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
0160 EXPORT_SYMBOL(cpu_hwcap_keys);
0161
0162 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
0163 { \
0164 .sign = SIGNED, \
0165 .visible = VISIBLE, \
0166 .strict = STRICT, \
0167 .type = TYPE, \
0168 .shift = SHIFT, \
0169 .width = WIDTH, \
0170 .safe_val = SAFE_VAL, \
0171 }
0172
0173
0174 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
0175 __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
0176
0177
0178 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
0179 __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
0180
0181 #define ARM64_FTR_END \
0182 { \
0183 .width = 0, \
0184 }
0185
0186 static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
0187
0188 static bool __system_matches_cap(unsigned int n);
0189
0190
0191
0192
0193
0194 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
0195 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0),
0196 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0),
0197 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0),
0198 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0),
0199 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0),
0200 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0),
0201 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0),
0202 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0),
0203 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0),
0204 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0),
0205 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0),
0206 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0),
0207 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0),
0208 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0),
0209 ARM64_FTR_END,
0210 };
0211
0212 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
0213 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
0214 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
0215 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0),
0216 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0),
0217 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0),
0218 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0),
0219 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
0220 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0),
0221 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
0222 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0),
0223 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0),
0224 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0),
0225 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0),
0226 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
0227 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0),
0228 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
0229 FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0),
0230 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0),
0231 ARM64_FTR_END,
0232 };
0233
0234 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
0235 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
0236 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
0237 FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
0238 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
0239 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0),
0240 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0),
0241 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0),
0242 ARM64_FTR_END,
0243 };
0244
0245 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
0246 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
0247 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
0248 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
0249 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
0250 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
0251 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
0252 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0253 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
0254 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
0255 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
0256 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
0257 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
0258 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
0259 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
0260 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
0261 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_ELx_64BIT_ONLY),
0262 ARM64_FTR_END,
0263 };
0264
0265 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
0266 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0267 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0),
0268 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
0269 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
0270 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
0271 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
0272 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
0273 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
0274 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
0275 ARM64_FTR_END,
0276 };
0277
0278 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
0279 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0280 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0),
0281 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0282 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0),
0283 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0284 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0),
0285 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0286 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0),
0287 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0288 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0),
0289 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0290 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0),
0291 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0292 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0),
0293 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0294 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0),
0295 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
0296 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0),
0297 ARM64_FTR_END,
0298 };
0299
0300 static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
0301 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0302 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
0303 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0304 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
0305 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0306 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0),
0307 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0308 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
0309 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0310 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0),
0311 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0312 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0),
0313 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
0314 FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0),
0315 ARM64_FTR_END,
0316 };
0317
0318 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
0319 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
0320 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
0321 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
0338 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
0339 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
0340
0341
0342
0343
0344
0345
0346
0347 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
0348 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
0349 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
0350
0351 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
0352
0353 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
0354 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
0355 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
0356
0357
0358
0359
0360 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
0361 ARM64_FTR_END,
0362 };
0363
0364 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
0365 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TIDCP1_SHIFT, 4, 0),
0366 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
0367 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
0368 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
0369 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
0370 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
0371 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
0372 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
0373 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
0374 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
0375 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
0376 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
0377 ARM64_FTR_END,
0378 };
0379
0380 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
0381 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
0382 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
0383 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
0384 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
0385 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
0386 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
0387 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
0388 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
0389 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
0390 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
0391 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
0392 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
0393 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
0394 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
0395 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
0396 ARM64_FTR_END,
0397 };
0398
0399 static const struct arm64_ftr_bits ftr_ctr[] = {
0400 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1),
0401 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
0402 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1),
0403 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0),
0404 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0),
0405 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1),
0406
0407
0408
0409
0410
0411 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT),
0412 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0),
0413 ARM64_FTR_END,
0414 };
0415
0416 static struct arm64_ftr_override __ro_after_init no_override = { };
0417
0418 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
0419 .name = "SYS_CTR_EL0",
0420 .ftr_bits = ftr_ctr,
0421 .override = &no_override,
0422 };
0423
0424 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
0425 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
0426 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
0427 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
0428 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
0429 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
0430 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
0431 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
0432 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
0433 ARM64_FTR_END,
0434 };
0435
0436 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
0437 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
0438 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
0439 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
0440 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
0441 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
0442
0443
0444
0445
0446 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
0447 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
0448 ARM64_FTR_END,
0449 };
0450
0451 static const struct arm64_ftr_bits ftr_mvfr2[] = {
0452 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
0453 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
0454 ARM64_FTR_END,
0455 };
0456
0457 static const struct arm64_ftr_bits ftr_dczid[] = {
0458 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_EL0_DZP_SHIFT, 1, 1),
0459 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0),
0460 ARM64_FTR_END,
0461 };
0462
0463 static const struct arm64_ftr_bits ftr_gmid[] = {
0464 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0),
0465 ARM64_FTR_END,
0466 };
0467
0468 static const struct arm64_ftr_bits ftr_id_isar0[] = {
0469 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
0470 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
0471 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
0472 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
0473 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
0474 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
0475 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
0476 ARM64_FTR_END,
0477 };
0478
0479 static const struct arm64_ftr_bits ftr_id_isar5[] = {
0480 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
0481 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
0482 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
0483 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
0484 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
0485 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
0486 ARM64_FTR_END,
0487 };
0488
0489 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
0490 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
0491 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
0492 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
0493 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
0494 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
0495 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
0496 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
0497
0498
0499
0500
0501
0502
0503
0504 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
0505 ARM64_FTR_END,
0506 };
0507
0508 static const struct arm64_ftr_bits ftr_id_isar4[] = {
0509 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
0510 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
0511 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
0512 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
0513 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
0514 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
0515 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
0516 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
0517 ARM64_FTR_END,
0518 };
0519
0520 static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
0521 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
0522 ARM64_FTR_END,
0523 };
0524
0525 static const struct arm64_ftr_bits ftr_id_isar6[] = {
0526 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
0527 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
0528 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
0529 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
0530 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
0531 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
0532 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
0533 ARM64_FTR_END,
0534 };
0535
0536 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
0537 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
0538 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
0539 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
0540 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
0541 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
0542 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
0543 ARM64_FTR_END,
0544 };
0545
0546 static const struct arm64_ftr_bits ftr_id_pfr1[] = {
0547 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
0548 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
0549 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
0550 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
0551 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
0552 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
0553 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
0554 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
0555 ARM64_FTR_END,
0556 };
0557
0558 static const struct arm64_ftr_bits ftr_id_pfr2[] = {
0559 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
0560 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
0561 ARM64_FTR_END,
0562 };
0563
0564 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
0565
0566 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
0567 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
0568 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
0569 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
0570 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
0571 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
0572 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
0573 ARM64_FTR_END,
0574 };
0575
0576 static const struct arm64_ftr_bits ftr_id_dfr1[] = {
0577 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
0578 ARM64_FTR_END,
0579 };
0580
0581 static const struct arm64_ftr_bits ftr_zcr[] = {
0582 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
0583 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0),
0584 ARM64_FTR_END,
0585 };
0586
0587 static const struct arm64_ftr_bits ftr_smcr[] = {
0588 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
0589 SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0),
0590 ARM64_FTR_END,
0591 };
0592
0593
0594
0595
0596
0597
0598
0599 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
0600 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
0601 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
0602 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
0603 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
0604 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
0605 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
0606 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
0607 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
0608 ARM64_FTR_END,
0609 };
0610
0611
0612 static const struct arm64_ftr_bits ftr_single32[] = {
0613 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
0614 ARM64_FTR_END,
0615 };
0616
0617 static const struct arm64_ftr_bits ftr_raz[] = {
0618 ARM64_FTR_END,
0619 };
0620
0621 #define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
0622 .sys_id = id, \
0623 .reg = &(struct arm64_ftr_reg){ \
0624 .name = id_str, \
0625 .override = (ovr), \
0626 .ftr_bits = &((table)[0]), \
0627 }}
0628
0629 #define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
0630 __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
0631
0632 #define ARM64_FTR_REG(id, table) \
0633 __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
0634
0635 struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
0636 struct arm64_ftr_override __ro_after_init id_aa64pfr0_override;
0637 struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
0638 struct arm64_ftr_override __ro_after_init id_aa64zfr0_override;
0639 struct arm64_ftr_override __ro_after_init id_aa64smfr0_override;
0640 struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
0641 struct arm64_ftr_override __ro_after_init id_aa64isar2_override;
0642
0643 static const struct __ftr_reg_entry {
0644 u32 sys_id;
0645 struct arm64_ftr_reg *reg;
0646 } arm64_ftr_regs[] = {
0647
0648
0649 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
0650 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
0651 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
0652 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
0653 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
0654 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
0655 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
0656
0657
0658 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
0659 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
0660 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
0661 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
0662 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
0663 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
0664 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
0665 ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
0666
0667
0668 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
0669 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
0670 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
0671 ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
0672 ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
0673 ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
0674
0675
0676 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0,
0677 &id_aa64pfr0_override),
0678 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
0679 &id_aa64pfr1_override),
0680 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0,
0681 &id_aa64zfr0_override),
0682 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0,
0683 &id_aa64smfr0_override),
0684
0685
0686 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
0687 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
0688
0689
0690 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
0691 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
0692 &id_aa64isar1_override),
0693 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
0694 &id_aa64isar2_override),
0695
0696
0697 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
0698 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
0699 &id_aa64mmfr1_override),
0700 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
0701
0702
0703 ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
0704 ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr),
0705
0706
0707 ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
0708
0709
0710 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
0711 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
0712
0713
0714 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
0715 };
0716
0717 static int search_cmp_ftr_reg(const void *id, const void *regp)
0718 {
0719 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
0720 }
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
0733 {
0734 const struct __ftr_reg_entry *ret;
0735
0736 ret = bsearch((const void *)(unsigned long)sys_id,
0737 arm64_ftr_regs,
0738 ARRAY_SIZE(arm64_ftr_regs),
0739 sizeof(arm64_ftr_regs[0]),
0740 search_cmp_ftr_reg);
0741 if (ret)
0742 return ret->reg;
0743 return NULL;
0744 }
0745
0746
0747
0748
0749
0750
0751
0752
0753 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
0754 {
0755 struct arm64_ftr_reg *reg;
0756
0757 reg = get_arm64_ftr_reg_nowarn(sys_id);
0758
0759
0760
0761
0762
0763 WARN_ON(!reg);
0764 return reg;
0765 }
0766
0767 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
0768 s64 ftr_val)
0769 {
0770 u64 mask = arm64_ftr_mask(ftrp);
0771
0772 reg &= ~mask;
0773 reg |= (ftr_val << ftrp->shift) & mask;
0774 return reg;
0775 }
0776
0777 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
0778 s64 cur)
0779 {
0780 s64 ret = 0;
0781
0782 switch (ftrp->type) {
0783 case FTR_EXACT:
0784 ret = ftrp->safe_val;
0785 break;
0786 case FTR_LOWER_SAFE:
0787 ret = min(new, cur);
0788 break;
0789 case FTR_HIGHER_OR_ZERO_SAFE:
0790 if (!cur || !new)
0791 break;
0792 fallthrough;
0793 case FTR_HIGHER_SAFE:
0794 ret = max(new, cur);
0795 break;
0796 default:
0797 BUG();
0798 }
0799
0800 return ret;
0801 }
0802
0803 static void __init sort_ftr_regs(void)
0804 {
0805 unsigned int i;
0806
0807 for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
0808 const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
0809 const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
0810 unsigned int j = 0;
0811
0812
0813
0814
0815
0816 for (; ftr_bits->width != 0; ftr_bits++, j++) {
0817 unsigned int width = ftr_reg->ftr_bits[j].width;
0818 unsigned int shift = ftr_reg->ftr_bits[j].shift;
0819 unsigned int prev_shift;
0820
0821 WARN((shift + width) > 64,
0822 "%s has invalid feature at shift %d\n",
0823 ftr_reg->name, shift);
0824
0825
0826
0827
0828
0829 if (j == 0)
0830 continue;
0831
0832 prev_shift = ftr_reg->ftr_bits[j - 1].shift;
0833 WARN((shift + width) > prev_shift,
0834 "%s has feature overlap at shift %d\n",
0835 ftr_reg->name, shift);
0836 }
0837
0838
0839
0840
0841
0842 if (i == 0)
0843 continue;
0844
0845
0846
0847
0848
0849 BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
0850 }
0851 }
0852
0853
0854
0855
0856
0857
0858
0859 static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
0860 {
0861 u64 val = 0;
0862 u64 strict_mask = ~0x0ULL;
0863 u64 user_mask = 0;
0864 u64 valid_mask = 0;
0865
0866 const struct arm64_ftr_bits *ftrp;
0867 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
0868
0869 if (!reg)
0870 return;
0871
0872 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
0873 u64 ftr_mask = arm64_ftr_mask(ftrp);
0874 s64 ftr_new = arm64_ftr_value(ftrp, new);
0875 s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
0876
0877 if ((ftr_mask & reg->override->mask) == ftr_mask) {
0878 s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
0879 char *str = NULL;
0880
0881 if (ftr_ovr != tmp) {
0882
0883 reg->override->mask &= ~ftr_mask;
0884 reg->override->val &= ~ftr_mask;
0885 tmp = ftr_ovr;
0886 str = "ignoring override";
0887 } else if (ftr_new != tmp) {
0888
0889 ftr_new = tmp;
0890 str = "forced";
0891 } else if (ftr_ovr == tmp) {
0892
0893 str = "already set";
0894 }
0895
0896 if (str)
0897 pr_warn("%s[%d:%d]: %s to %llx\n",
0898 reg->name,
0899 ftrp->shift + ftrp->width - 1,
0900 ftrp->shift, str, tmp);
0901 } else if ((ftr_mask & reg->override->val) == ftr_mask) {
0902 reg->override->val &= ~ftr_mask;
0903 pr_warn("%s[%d:%d]: impossible override, ignored\n",
0904 reg->name,
0905 ftrp->shift + ftrp->width - 1,
0906 ftrp->shift);
0907 }
0908
0909 val = arm64_ftr_set_value(ftrp, val, ftr_new);
0910
0911 valid_mask |= ftr_mask;
0912 if (!ftrp->strict)
0913 strict_mask &= ~ftr_mask;
0914 if (ftrp->visible)
0915 user_mask |= ftr_mask;
0916 else
0917 reg->user_val = arm64_ftr_set_value(ftrp,
0918 reg->user_val,
0919 ftrp->safe_val);
0920 }
0921
0922 val &= valid_mask;
0923
0924 reg->sys_val = val;
0925 reg->strict_mask = strict_mask;
0926 reg->user_mask = user_mask;
0927 }
0928
0929 extern const struct arm64_cpu_capabilities arm64_errata[];
0930 static const struct arm64_cpu_capabilities arm64_features[];
0931
0932 static void __init
0933 init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
0934 {
0935 for (; caps->matches; caps++) {
0936 if (WARN(caps->capability >= ARM64_NCAPS,
0937 "Invalid capability %d\n", caps->capability))
0938 continue;
0939 if (WARN(cpu_hwcaps_ptrs[caps->capability],
0940 "Duplicate entry for capability %d\n",
0941 caps->capability))
0942 continue;
0943 cpu_hwcaps_ptrs[caps->capability] = caps;
0944 }
0945 }
0946
0947 static void __init init_cpu_hwcaps_indirect_list(void)
0948 {
0949 init_cpu_hwcaps_indirect_list_from_array(arm64_features);
0950 init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
0951 }
0952
0953 static void __init setup_boot_cpu_capabilities(void);
0954
0955 static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
0956 {
0957 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
0958 init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
0959 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
0960 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
0961 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
0962 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
0963 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
0964 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
0965 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
0966 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
0967 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
0968 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
0969 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
0970 init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
0971 init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
0972 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
0973 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
0974 init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
0975 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
0976 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
0977 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
0978 }
0979
0980 void __init init_cpu_features(struct cpuinfo_arm64 *info)
0981 {
0982
0983 sort_ftr_regs();
0984
0985 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
0986 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
0987 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
0988 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
0989 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
0990 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
0991 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
0992 init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
0993 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
0994 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
0995 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
0996 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
0997 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
0998 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
0999 init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0);
1000
1001 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
1002 init_32bit_cpu_features(&info->aarch32);
1003
1004 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
1005 id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
1006 info->reg_zcr = read_zcr_features();
1007 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
1008 vec_init_vq_map(ARM64_VEC_SVE);
1009 }
1010
1011 if (IS_ENABLED(CONFIG_ARM64_SME) &&
1012 id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
1013 info->reg_smcr = read_smcr_features();
1014
1015
1016
1017
1018
1019 info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
1020 init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
1021 vec_init_vq_map(ARM64_VEC_SME);
1022 }
1023
1024 if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
1025 init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
1026
1027
1028
1029
1030
1031 init_cpu_hwcaps_indirect_list();
1032
1033
1034
1035
1036
1037 setup_boot_cpu_capabilities();
1038 }
1039
1040 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
1041 {
1042 const struct arm64_ftr_bits *ftrp;
1043
1044 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
1045 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
1046 s64 ftr_new = arm64_ftr_value(ftrp, new);
1047
1048 if (ftr_cur == ftr_new)
1049 continue;
1050
1051 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
1052 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
1053 }
1054
1055 }
1056
1057 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
1058 {
1059 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
1060
1061 if (!regp)
1062 return 0;
1063
1064 update_cpu_ftr_reg(regp, val);
1065 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
1066 return 0;
1067 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
1068 regp->name, boot, cpu, val);
1069 return 1;
1070 }
1071
1072 static void relax_cpu_ftr_reg(u32 sys_id, int field)
1073 {
1074 const struct arm64_ftr_bits *ftrp;
1075 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
1076
1077 if (!regp)
1078 return;
1079
1080 for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
1081 if (ftrp->shift == field) {
1082 regp->strict_mask &= ~arm64_ftr_mask(ftrp);
1083 break;
1084 }
1085 }
1086
1087
1088 WARN_ON(!ftrp->width);
1089 }
1090
1091 static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
1092 struct cpuinfo_arm64 *boot)
1093 {
1094 static bool boot_cpu_32bit_regs_overridden = false;
1095
1096 if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
1097 return;
1098
1099 if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
1100 return;
1101
1102 boot->aarch32 = info->aarch32;
1103 init_32bit_cpu_features(&boot->aarch32);
1104 boot_cpu_32bit_regs_overridden = true;
1105 }
1106
1107 static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
1108 struct cpuinfo_32bit *boot)
1109 {
1110 int taint = 0;
1111 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1112
1113
1114
1115
1116
1117 if (!id_aa64pfr0_32bit_el1(pfr0)) {
1118 relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
1119 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
1120 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
1121 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
1122 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
1123 relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
1124 }
1125
1126 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
1127 info->reg_id_dfr0, boot->reg_id_dfr0);
1128 taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
1129 info->reg_id_dfr1, boot->reg_id_dfr1);
1130 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
1131 info->reg_id_isar0, boot->reg_id_isar0);
1132 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
1133 info->reg_id_isar1, boot->reg_id_isar1);
1134 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
1135 info->reg_id_isar2, boot->reg_id_isar2);
1136 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
1137 info->reg_id_isar3, boot->reg_id_isar3);
1138 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
1139 info->reg_id_isar4, boot->reg_id_isar4);
1140 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
1141 info->reg_id_isar5, boot->reg_id_isar5);
1142 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
1143 info->reg_id_isar6, boot->reg_id_isar6);
1144
1145
1146
1147
1148
1149
1150 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
1151 info->reg_id_mmfr0, boot->reg_id_mmfr0);
1152 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
1153 info->reg_id_mmfr1, boot->reg_id_mmfr1);
1154 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
1155 info->reg_id_mmfr2, boot->reg_id_mmfr2);
1156 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
1157 info->reg_id_mmfr3, boot->reg_id_mmfr3);
1158 taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
1159 info->reg_id_mmfr4, boot->reg_id_mmfr4);
1160 taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
1161 info->reg_id_mmfr5, boot->reg_id_mmfr5);
1162 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
1163 info->reg_id_pfr0, boot->reg_id_pfr0);
1164 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
1165 info->reg_id_pfr1, boot->reg_id_pfr1);
1166 taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
1167 info->reg_id_pfr2, boot->reg_id_pfr2);
1168 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
1169 info->reg_mvfr0, boot->reg_mvfr0);
1170 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
1171 info->reg_mvfr1, boot->reg_mvfr1);
1172 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
1173 info->reg_mvfr2, boot->reg_mvfr2);
1174
1175 return taint;
1176 }
1177
1178
1179
1180
1181
1182
1183 void update_cpu_features(int cpu,
1184 struct cpuinfo_arm64 *info,
1185 struct cpuinfo_arm64 *boot)
1186 {
1187 int taint = 0;
1188
1189
1190
1191
1192
1193
1194 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
1195 info->reg_ctr, boot->reg_ctr);
1196
1197
1198
1199
1200
1201
1202 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
1203 info->reg_dczid, boot->reg_dczid);
1204
1205
1206 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
1207 info->reg_cntfrq, boot->reg_cntfrq);
1208
1209
1210
1211
1212
1213
1214
1215 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
1216 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
1217 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
1218 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
1219
1220
1221
1222
1223 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
1224 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
1225 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
1226 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1227 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
1228 info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
1229
1230
1231
1232
1233
1234
1235 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
1236 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
1237 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
1238 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
1239 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
1240 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
1241
1242 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
1243 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
1244 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
1245 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
1246
1247 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
1248 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
1249
1250 taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
1251 info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
1252
1253 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
1254 id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
1255 info->reg_zcr = read_zcr_features();
1256 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
1257 info->reg_zcr, boot->reg_zcr);
1258
1259
1260 if (!system_capabilities_finalized())
1261 vec_update_vq_map(ARM64_VEC_SVE);
1262 }
1263
1264 if (IS_ENABLED(CONFIG_ARM64_SME) &&
1265 id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
1266 info->reg_smcr = read_smcr_features();
1267
1268
1269
1270
1271
1272 info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
1273 taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu,
1274 info->reg_smcr, boot->reg_smcr);
1275
1276
1277 if (!system_capabilities_finalized())
1278 vec_update_vq_map(ARM64_VEC_SME);
1279 }
1280
1281
1282
1283
1284
1285
1286 if (IS_ENABLED(CONFIG_ARM64_MTE) &&
1287 id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
1288 taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
1289 info->reg_gmid, boot->reg_gmid);
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
1301 lazy_init_32bit_cpu_features(info, boot);
1302 taint |= update_32bit_cpu_features(cpu, &info->aarch32,
1303 &boot->aarch32);
1304 }
1305
1306
1307
1308
1309
1310 if (taint) {
1311 pr_warn_once("Unsupported CPU feature variation detected.\n");
1312 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1313 }
1314 }
1315
1316 u64 read_sanitised_ftr_reg(u32 id)
1317 {
1318 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
1319
1320 if (!regp)
1321 return 0;
1322 return regp->sys_val;
1323 }
1324 EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
1325
1326 #define read_sysreg_case(r) \
1327 case r: val = read_sysreg_s(r); break;
1328
1329
1330
1331
1332
1333 u64 __read_sysreg_by_encoding(u32 sys_id)
1334 {
1335 struct arm64_ftr_reg *regp;
1336 u64 val;
1337
1338 switch (sys_id) {
1339 read_sysreg_case(SYS_ID_PFR0_EL1);
1340 read_sysreg_case(SYS_ID_PFR1_EL1);
1341 read_sysreg_case(SYS_ID_PFR2_EL1);
1342 read_sysreg_case(SYS_ID_DFR0_EL1);
1343 read_sysreg_case(SYS_ID_DFR1_EL1);
1344 read_sysreg_case(SYS_ID_MMFR0_EL1);
1345 read_sysreg_case(SYS_ID_MMFR1_EL1);
1346 read_sysreg_case(SYS_ID_MMFR2_EL1);
1347 read_sysreg_case(SYS_ID_MMFR3_EL1);
1348 read_sysreg_case(SYS_ID_MMFR4_EL1);
1349 read_sysreg_case(SYS_ID_MMFR5_EL1);
1350 read_sysreg_case(SYS_ID_ISAR0_EL1);
1351 read_sysreg_case(SYS_ID_ISAR1_EL1);
1352 read_sysreg_case(SYS_ID_ISAR2_EL1);
1353 read_sysreg_case(SYS_ID_ISAR3_EL1);
1354 read_sysreg_case(SYS_ID_ISAR4_EL1);
1355 read_sysreg_case(SYS_ID_ISAR5_EL1);
1356 read_sysreg_case(SYS_ID_ISAR6_EL1);
1357 read_sysreg_case(SYS_MVFR0_EL1);
1358 read_sysreg_case(SYS_MVFR1_EL1);
1359 read_sysreg_case(SYS_MVFR2_EL1);
1360
1361 read_sysreg_case(SYS_ID_AA64PFR0_EL1);
1362 read_sysreg_case(SYS_ID_AA64PFR1_EL1);
1363 read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
1364 read_sysreg_case(SYS_ID_AA64SMFR0_EL1);
1365 read_sysreg_case(SYS_ID_AA64DFR0_EL1);
1366 read_sysreg_case(SYS_ID_AA64DFR1_EL1);
1367 read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
1368 read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
1369 read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
1370 read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
1371 read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1372 read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
1373
1374 read_sysreg_case(SYS_CNTFRQ_EL0);
1375 read_sysreg_case(SYS_CTR_EL0);
1376 read_sysreg_case(SYS_DCZID_EL0);
1377
1378 default:
1379 BUG();
1380 return 0;
1381 }
1382
1383 regp = get_arm64_ftr_reg(sys_id);
1384 if (regp) {
1385 val &= ~regp->override->mask;
1386 val |= (regp->override->val & regp->override->mask);
1387 }
1388
1389 return val;
1390 }
1391
1392 #include <linux/irqchip/arm-gic-v3.h>
1393
1394 static bool
1395 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
1396 {
1397 int val = cpuid_feature_extract_field_width(reg, entry->field_pos,
1398 entry->field_width,
1399 entry->sign);
1400
1401 return val >= entry->min_field_value;
1402 }
1403
1404 static bool
1405 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
1406 {
1407 u64 val;
1408
1409 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1410 if (scope == SCOPE_SYSTEM)
1411 val = read_sanitised_ftr_reg(entry->sys_reg);
1412 else
1413 val = __read_sysreg_by_encoding(entry->sys_reg);
1414
1415 return feature_matches(val, entry);
1416 }
1417
1418 const struct cpumask *system_32bit_el0_cpumask(void)
1419 {
1420 if (!system_supports_32bit_el0())
1421 return cpu_none_mask;
1422
1423 if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
1424 return cpu_32bit_el0_mask;
1425
1426 return cpu_possible_mask;
1427 }
1428
1429 static int __init parse_32bit_el0_param(char *str)
1430 {
1431 allow_mismatched_32bit_el0 = true;
1432 return 0;
1433 }
1434 early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param);
1435
1436 static ssize_t aarch32_el0_show(struct device *dev,
1437 struct device_attribute *attr, char *buf)
1438 {
1439 const struct cpumask *mask = system_32bit_el0_cpumask();
1440
1441 return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask));
1442 }
1443 static const DEVICE_ATTR_RO(aarch32_el0);
1444
1445 static int __init aarch32_el0_sysfs_init(void)
1446 {
1447 if (!allow_mismatched_32bit_el0)
1448 return 0;
1449
1450 return device_create_file(cpu_subsys.dev_root, &dev_attr_aarch32_el0);
1451 }
1452 device_initcall(aarch32_el0_sysfs_init);
1453
1454 static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
1455 {
1456 if (!has_cpuid_feature(entry, scope))
1457 return allow_mismatched_32bit_el0;
1458
1459 if (scope == SCOPE_SYSTEM)
1460 pr_info("detected: 32-bit EL0 Support\n");
1461
1462 return true;
1463 }
1464
1465 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
1466 {
1467 bool has_sre;
1468
1469 if (!has_cpuid_feature(entry, scope))
1470 return false;
1471
1472 has_sre = gic_enable_sre();
1473 if (!has_sre)
1474 pr_warn_once("%s present but disabled by higher exception level\n",
1475 entry->desc);
1476
1477 return has_sre;
1478 }
1479
1480 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
1481 {
1482 u32 midr = read_cpuid_id();
1483
1484
1485 return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
1486 MIDR_CPU_VAR_REV(0, 0),
1487 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
1488 }
1489
1490 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
1491 {
1492 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1493
1494 return cpuid_feature_extract_signed_field(pfr0,
1495 ID_AA64PFR0_FP_SHIFT) < 0;
1496 }
1497
1498 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
1499 int scope)
1500 {
1501 u64 ctr;
1502
1503 if (scope == SCOPE_SYSTEM)
1504 ctr = arm64_ftr_reg_ctrel0.sys_val;
1505 else
1506 ctr = read_cpuid_effective_cachetype();
1507
1508 return ctr & BIT(CTR_EL0_IDC_SHIFT);
1509 }
1510
1511 static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
1512 {
1513
1514
1515
1516
1517
1518
1519 if (!(read_cpuid_cachetype() & BIT(CTR_EL0_IDC_SHIFT)))
1520 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
1521 }
1522
1523 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
1524 int scope)
1525 {
1526 u64 ctr;
1527
1528 if (scope == SCOPE_SYSTEM)
1529 ctr = arm64_ftr_reg_ctrel0.sys_val;
1530 else
1531 ctr = read_cpuid_cachetype();
1532
1533 return ctr & BIT(CTR_EL0_DIC_SHIFT);
1534 }
1535
1536 static bool __maybe_unused
1537 has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
1538 {
1539
1540
1541
1542
1543
1544 if (is_kdump_kernel())
1545 return false;
1546
1547 if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
1548 return false;
1549
1550 return has_cpuid_feature(entry, scope);
1551 }
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 bool kaslr_requires_kpti(void)
1563 {
1564 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
1565 return false;
1566
1567
1568
1569
1570
1571 if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
1572 u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1573 if (cpuid_feature_extract_unsigned_field(mmfr2,
1574 ID_AA64MMFR2_E0PD_SHIFT))
1575 return false;
1576 }
1577
1578
1579
1580
1581
1582 if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
1583 extern const struct midr_range cavium_erratum_27456_cpus[];
1584
1585 if (is_midr_in_range_list(read_cpuid_id(),
1586 cavium_erratum_27456_cpus))
1587 return false;
1588 }
1589
1590 return kaslr_offset() > 0;
1591 }
1592
1593 static bool __meltdown_safe = true;
1594 static int __kpti_forced;
1595
1596 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
1597 int scope)
1598 {
1599
1600 static const struct midr_range kpti_safe_list[] = {
1601 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1602 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1603 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
1604 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1605 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1606 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1607 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1608 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1609 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
1610 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
1611 MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
1612 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
1613 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
1614 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
1615 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
1616 { }
1617 };
1618 char const *str = "kpti command line option";
1619 bool meltdown_safe;
1620
1621 meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
1622
1623
1624 if (has_cpuid_feature(entry, scope))
1625 meltdown_safe = true;
1626
1627 if (!meltdown_safe)
1628 __meltdown_safe = false;
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1640 str = "ARM64_WORKAROUND_CAVIUM_27456";
1641 __kpti_forced = -1;
1642 }
1643
1644
1645 if (kaslr_requires_kpti()) {
1646 if (!__kpti_forced) {
1647 str = "KASLR";
1648 __kpti_forced = 1;
1649 }
1650 }
1651
1652 if (cpu_mitigations_off() && !__kpti_forced) {
1653 str = "mitigations=off";
1654 __kpti_forced = -1;
1655 }
1656
1657 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
1658 pr_info_once("kernel page table isolation disabled by kernel configuration\n");
1659 return false;
1660 }
1661
1662
1663 if (__kpti_forced) {
1664 pr_info_once("kernel page table isolation forced %s by %s\n",
1665 __kpti_forced > 0 ? "ON" : "OFF", str);
1666 return __kpti_forced > 0;
1667 }
1668
1669 return !meltdown_safe;
1670 }
1671
1672 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1673 #define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
1674
1675 extern
1676 void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
1677 phys_addr_t size, pgprot_t prot,
1678 phys_addr_t (*pgtable_alloc)(int), int flags);
1679
1680 static phys_addr_t kpti_ng_temp_alloc;
1681
1682 static phys_addr_t kpti_ng_pgd_alloc(int shift)
1683 {
1684 kpti_ng_temp_alloc -= PAGE_SIZE;
1685 return kpti_ng_temp_alloc;
1686 }
1687
1688 static void __nocfi
1689 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1690 {
1691 typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
1692 extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1693 kpti_remap_fn *remap_fn;
1694
1695 int cpu = smp_processor_id();
1696 int levels = CONFIG_PGTABLE_LEVELS;
1697 int order = order_base_2(levels);
1698 u64 kpti_ng_temp_pgd_pa = 0;
1699 pgd_t *kpti_ng_temp_pgd;
1700 u64 alloc = 0;
1701
1702 if (__this_cpu_read(this_cpu_vector) == vectors) {
1703 const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
1704
1705 __this_cpu_write(this_cpu_vector, v);
1706 }
1707
1708
1709
1710
1711
1712
1713 if (arm64_use_ng_mappings)
1714 return;
1715
1716 remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
1717
1718 if (!cpu) {
1719 alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
1720 kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
1721 kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739 create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc),
1740 KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
1741 kpti_ng_pgd_alloc, 0);
1742 }
1743
1744 cpu_install_idmap();
1745 remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
1746 cpu_uninstall_idmap();
1747
1748 if (!cpu) {
1749 free_pages(alloc, order);
1750 arm64_use_ng_mappings = true;
1751 }
1752 }
1753 #else
1754 static void
1755 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1756 {
1757 }
1758 #endif
1759
1760 static int __init parse_kpti(char *str)
1761 {
1762 bool enabled;
1763 int ret = strtobool(str, &enabled);
1764
1765 if (ret)
1766 return ret;
1767
1768 __kpti_forced = enabled ? 1 : -1;
1769 return 0;
1770 }
1771 early_param("kpti", parse_kpti);
1772
1773 #ifdef CONFIG_ARM64_HW_AFDBM
1774 static inline void __cpu_enable_hw_dbm(void)
1775 {
1776 u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
1777
1778 write_sysreg(tcr, tcr_el1);
1779 isb();
1780 local_flush_tlb_all();
1781 }
1782
1783 static bool cpu_has_broken_dbm(void)
1784 {
1785
1786 static const struct midr_range cpus[] = {
1787 #ifdef CONFIG_ARM64_ERRATUM_1024718
1788 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1789
1790 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
1791 #endif
1792 #ifdef CONFIG_ARM64_ERRATUM_2051678
1793 MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
1794 #endif
1795 {},
1796 };
1797
1798 return is_midr_in_range_list(read_cpuid_id(), cpus);
1799 }
1800
1801 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1802 {
1803 return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1804 !cpu_has_broken_dbm();
1805 }
1806
1807 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1808 {
1809 if (cpu_can_use_dbm(cap))
1810 __cpu_enable_hw_dbm();
1811 }
1812
1813 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1814 int __unused)
1815 {
1816 static bool detected = false;
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 if (!detected && cpu_can_use_dbm(cap)) {
1835 detected = true;
1836 pr_info("detected: Hardware dirty bit management\n");
1837 }
1838
1839 return true;
1840 }
1841
1842 #endif
1843
1844 #ifdef CONFIG_ARM64_AMU_EXTN
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855 static struct cpumask amu_cpus __read_mostly;
1856
1857 bool cpu_has_amu_feat(int cpu)
1858 {
1859 return cpumask_test_cpu(cpu, &amu_cpus);
1860 }
1861
1862 int get_cpu_with_amu_feat(void)
1863 {
1864 return cpumask_any(&amu_cpus);
1865 }
1866
1867 static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
1868 {
1869 if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
1870 pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
1871 smp_processor_id());
1872 cpumask_set_cpu(smp_processor_id(), &amu_cpus);
1873
1874
1875 if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
1876 update_freq_counters_refs();
1877 }
1878 }
1879
1880 static bool has_amu(const struct arm64_cpu_capabilities *cap,
1881 int __unused)
1882 {
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 return true;
1897 }
1898 #else
1899 int get_cpu_with_amu_feat(void)
1900 {
1901 return nr_cpu_ids;
1902 }
1903 #endif
1904
1905 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1906 {
1907 return is_kernel_in_hyp_mode();
1908 }
1909
1910 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1911 {
1912
1913
1914
1915
1916
1917
1918
1919
1920 if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
1921 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1922 }
1923
1924 #ifdef CONFIG_ARM64_PAN
1925 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
1926 {
1927
1928
1929
1930
1931 WARN_ON_ONCE(in_interrupt());
1932
1933 sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1934 set_pstate_pan(1);
1935 }
1936 #endif
1937
1938 #ifdef CONFIG_ARM64_RAS_EXTN
1939 static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1940 {
1941
1942 write_sysreg_s(0, SYS_DISR_EL1);
1943 }
1944 #endif
1945
1946 #ifdef CONFIG_ARM64_PTR_AUTH
1947 static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
1948 {
1949 int boot_val, sec_val;
1950
1951
1952 WARN_ON(scope == SCOPE_SYSTEM);
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
1964 entry->field_pos, entry->sign);
1965 if (scope & SCOPE_BOOT_CPU)
1966 return boot_val >= entry->min_field_value;
1967
1968 sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
1969 entry->field_pos, entry->sign);
1970 return (sec_val >= entry->min_field_value) && (sec_val == boot_val);
1971 }
1972
1973 static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
1974 int scope)
1975 {
1976 bool api = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
1977 bool apa = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope);
1978 bool apa3 = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope);
1979
1980 return apa || apa3 || api;
1981 }
1982
1983 static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
1984 int __unused)
1985 {
1986 bool gpi = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
1987 bool gpa = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5);
1988 bool gpa3 = __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3);
1989
1990 return gpa || gpa3 || gpi;
1991 }
1992 #endif
1993
1994 #ifdef CONFIG_ARM64_E0PD
1995 static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
1996 {
1997 if (this_cpu_has_cap(ARM64_HAS_E0PD))
1998 sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
1999 }
2000 #endif
2001
2002 #ifdef CONFIG_ARM64_PSEUDO_NMI
2003 static bool enable_pseudo_nmi;
2004
2005 static int __init early_enable_pseudo_nmi(char *p)
2006 {
2007 return strtobool(p, &enable_pseudo_nmi);
2008 }
2009 early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
2010
2011 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
2012 int scope)
2013 {
2014 return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
2015 }
2016 #endif
2017
2018 #ifdef CONFIG_ARM64_BTI
2019 static void bti_enable(const struct arm64_cpu_capabilities *__unused)
2020 {
2021
2022
2023
2024
2025
2026
2027
2028 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
2029 isb();
2030 }
2031 #endif
2032
2033 #ifdef CONFIG_ARM64_MTE
2034 static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
2035 {
2036 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
2037 isb();
2038
2039
2040
2041
2042
2043 if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
2044 mte_clear_page_tags(lm_alias(empty_zero_page));
2045
2046 kasan_init_hw_tags_cpu();
2047 }
2048 #endif
2049
2050 static void elf_hwcap_fixup(void)
2051 {
2052 #ifdef CONFIG_ARM64_ERRATUM_1742098
2053 if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
2054 compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
2055 #endif
2056 }
2057
2058 #ifdef CONFIG_KVM
2059 static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
2060 {
2061 return kvm_get_mode() == KVM_MODE_PROTECTED;
2062 }
2063 #endif
2064
2065 static void cpu_trap_el0_impdef(const struct arm64_cpu_capabilities *__unused)
2066 {
2067 sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_TIDCP);
2068 }
2069
2070
2071 static bool
2072 cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
2073 {
2074 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
2075 }
2076
2077 static bool
2078 cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
2079 {
2080 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
2081 }
2082
2083 static bool
2084 cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
2085 {
2086 return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
2087 }
2088
2089 static const struct arm64_cpu_capabilities arm64_features[] = {
2090 {
2091 .desc = "GIC system register CPU interface",
2092 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
2093 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2094 .matches = has_useable_gicv3_cpuif,
2095 .sys_reg = SYS_ID_AA64PFR0_EL1,
2096 .field_pos = ID_AA64PFR0_GIC_SHIFT,
2097 .field_width = 4,
2098 .sign = FTR_UNSIGNED,
2099 .min_field_value = 1,
2100 },
2101 {
2102 .desc = "Enhanced Counter Virtualization",
2103 .capability = ARM64_HAS_ECV,
2104 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2105 .matches = has_cpuid_feature,
2106 .sys_reg = SYS_ID_AA64MMFR0_EL1,
2107 .field_pos = ID_AA64MMFR0_ECV_SHIFT,
2108 .field_width = 4,
2109 .sign = FTR_UNSIGNED,
2110 .min_field_value = 1,
2111 },
2112 #ifdef CONFIG_ARM64_PAN
2113 {
2114 .desc = "Privileged Access Never",
2115 .capability = ARM64_HAS_PAN,
2116 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2117 .matches = has_cpuid_feature,
2118 .sys_reg = SYS_ID_AA64MMFR1_EL1,
2119 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
2120 .field_width = 4,
2121 .sign = FTR_UNSIGNED,
2122 .min_field_value = 1,
2123 .cpu_enable = cpu_enable_pan,
2124 },
2125 #endif
2126 #ifdef CONFIG_ARM64_EPAN
2127 {
2128 .desc = "Enhanced Privileged Access Never",
2129 .capability = ARM64_HAS_EPAN,
2130 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2131 .matches = has_cpuid_feature,
2132 .sys_reg = SYS_ID_AA64MMFR1_EL1,
2133 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
2134 .field_width = 4,
2135 .sign = FTR_UNSIGNED,
2136 .min_field_value = 3,
2137 },
2138 #endif
2139 #ifdef CONFIG_ARM64_LSE_ATOMICS
2140 {
2141 .desc = "LSE atomic instructions",
2142 .capability = ARM64_HAS_LSE_ATOMICS,
2143 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2144 .matches = has_cpuid_feature,
2145 .sys_reg = SYS_ID_AA64ISAR0_EL1,
2146 .field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT,
2147 .field_width = 4,
2148 .sign = FTR_UNSIGNED,
2149 .min_field_value = 2,
2150 },
2151 #endif
2152 {
2153 .desc = "Software prefetching using PRFM",
2154 .capability = ARM64_HAS_NO_HW_PREFETCH,
2155 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2156 .matches = has_no_hw_prefetch,
2157 },
2158 {
2159 .desc = "Virtualization Host Extensions",
2160 .capability = ARM64_HAS_VIRT_HOST_EXTN,
2161 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2162 .matches = runs_at_el2,
2163 .cpu_enable = cpu_copy_el2regs,
2164 },
2165 {
2166 .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
2167 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2168 .matches = has_32bit_el0,
2169 .sys_reg = SYS_ID_AA64PFR0_EL1,
2170 .sign = FTR_UNSIGNED,
2171 .field_pos = ID_AA64PFR0_EL0_SHIFT,
2172 .field_width = 4,
2173 .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
2174 },
2175 #ifdef CONFIG_KVM
2176 {
2177 .desc = "32-bit EL1 Support",
2178 .capability = ARM64_HAS_32BIT_EL1,
2179 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2180 .matches = has_cpuid_feature,
2181 .sys_reg = SYS_ID_AA64PFR0_EL1,
2182 .sign = FTR_UNSIGNED,
2183 .field_pos = ID_AA64PFR0_EL1_SHIFT,
2184 .field_width = 4,
2185 .min_field_value = ID_AA64PFR0_ELx_32BIT_64BIT,
2186 },
2187 {
2188 .desc = "Protected KVM",
2189 .capability = ARM64_KVM_PROTECTED_MODE,
2190 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2191 .matches = is_kvm_protected_mode,
2192 },
2193 #endif
2194 {
2195 .desc = "Kernel page table isolation (KPTI)",
2196 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
2197 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
2198
2199
2200
2201
2202
2203 .sys_reg = SYS_ID_AA64PFR0_EL1,
2204 .field_pos = ID_AA64PFR0_CSV3_SHIFT,
2205 .field_width = 4,
2206 .min_field_value = 1,
2207 .matches = unmap_kernel_at_el0,
2208 .cpu_enable = kpti_install_ng_mappings,
2209 },
2210 {
2211
2212 .capability = ARM64_HAS_NO_FPSIMD,
2213 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
2214 .min_field_value = 0,
2215 .matches = has_no_fpsimd,
2216 },
2217 #ifdef CONFIG_ARM64_PMEM
2218 {
2219 .desc = "Data cache clean to Point of Persistence",
2220 .capability = ARM64_HAS_DCPOP,
2221 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2222 .matches = has_cpuid_feature,
2223 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2224 .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
2225 .field_width = 4,
2226 .min_field_value = 1,
2227 },
2228 {
2229 .desc = "Data cache clean to Point of Deep Persistence",
2230 .capability = ARM64_HAS_DCPODP,
2231 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2232 .matches = has_cpuid_feature,
2233 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2234 .sign = FTR_UNSIGNED,
2235 .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
2236 .field_width = 4,
2237 .min_field_value = 2,
2238 },
2239 #endif
2240 #ifdef CONFIG_ARM64_SVE
2241 {
2242 .desc = "Scalable Vector Extension",
2243 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2244 .capability = ARM64_SVE,
2245 .sys_reg = SYS_ID_AA64PFR0_EL1,
2246 .sign = FTR_UNSIGNED,
2247 .field_pos = ID_AA64PFR0_SVE_SHIFT,
2248 .field_width = 4,
2249 .min_field_value = ID_AA64PFR0_SVE,
2250 .matches = has_cpuid_feature,
2251 .cpu_enable = sve_kernel_enable,
2252 },
2253 #endif
2254 #ifdef CONFIG_ARM64_RAS_EXTN
2255 {
2256 .desc = "RAS Extension Support",
2257 .capability = ARM64_HAS_RAS_EXTN,
2258 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2259 .matches = has_cpuid_feature,
2260 .sys_reg = SYS_ID_AA64PFR0_EL1,
2261 .sign = FTR_UNSIGNED,
2262 .field_pos = ID_AA64PFR0_RAS_SHIFT,
2263 .field_width = 4,
2264 .min_field_value = ID_AA64PFR0_RAS_V1,
2265 .cpu_enable = cpu_clear_disr,
2266 },
2267 #endif
2268 #ifdef CONFIG_ARM64_AMU_EXTN
2269 {
2270
2271
2272
2273
2274
2275
2276 .capability = ARM64_HAS_AMU_EXTN,
2277 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2278 .matches = has_amu,
2279 .sys_reg = SYS_ID_AA64PFR0_EL1,
2280 .sign = FTR_UNSIGNED,
2281 .field_pos = ID_AA64PFR0_AMU_SHIFT,
2282 .field_width = 4,
2283 .min_field_value = ID_AA64PFR0_AMU,
2284 .cpu_enable = cpu_amu_enable,
2285 },
2286 #endif
2287 {
2288 .desc = "Data cache clean to the PoU not required for I/D coherence",
2289 .capability = ARM64_HAS_CACHE_IDC,
2290 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2291 .matches = has_cache_idc,
2292 .cpu_enable = cpu_emulate_effective_ctr,
2293 },
2294 {
2295 .desc = "Instruction cache invalidation not required for I/D coherence",
2296 .capability = ARM64_HAS_CACHE_DIC,
2297 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2298 .matches = has_cache_dic,
2299 },
2300 {
2301 .desc = "Stage-2 Force Write-Back",
2302 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2303 .capability = ARM64_HAS_STAGE2_FWB,
2304 .sys_reg = SYS_ID_AA64MMFR2_EL1,
2305 .sign = FTR_UNSIGNED,
2306 .field_pos = ID_AA64MMFR2_FWB_SHIFT,
2307 .field_width = 4,
2308 .min_field_value = 1,
2309 .matches = has_cpuid_feature,
2310 },
2311 {
2312 .desc = "ARMv8.4 Translation Table Level",
2313 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2314 .capability = ARM64_HAS_ARMv8_4_TTL,
2315 .sys_reg = SYS_ID_AA64MMFR2_EL1,
2316 .sign = FTR_UNSIGNED,
2317 .field_pos = ID_AA64MMFR2_TTL_SHIFT,
2318 .field_width = 4,
2319 .min_field_value = 1,
2320 .matches = has_cpuid_feature,
2321 },
2322 {
2323 .desc = "TLB range maintenance instructions",
2324 .capability = ARM64_HAS_TLB_RANGE,
2325 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2326 .matches = has_cpuid_feature,
2327 .sys_reg = SYS_ID_AA64ISAR0_EL1,
2328 .field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT,
2329 .field_width = 4,
2330 .sign = FTR_UNSIGNED,
2331 .min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE,
2332 },
2333 #ifdef CONFIG_ARM64_HW_AFDBM
2334 {
2335
2336
2337
2338
2339
2340
2341
2342
2343 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2344 .capability = ARM64_HW_DBM,
2345 .sys_reg = SYS_ID_AA64MMFR1_EL1,
2346 .sign = FTR_UNSIGNED,
2347 .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
2348 .field_width = 4,
2349 .min_field_value = 2,
2350 .matches = has_hw_dbm,
2351 .cpu_enable = cpu_enable_hw_dbm,
2352 },
2353 #endif
2354 {
2355 .desc = "CRC32 instructions",
2356 .capability = ARM64_HAS_CRC32,
2357 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2358 .matches = has_cpuid_feature,
2359 .sys_reg = SYS_ID_AA64ISAR0_EL1,
2360 .field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT,
2361 .field_width = 4,
2362 .min_field_value = 1,
2363 },
2364 {
2365 .desc = "Speculative Store Bypassing Safe (SSBS)",
2366 .capability = ARM64_SSBS,
2367 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2368 .matches = has_cpuid_feature,
2369 .sys_reg = SYS_ID_AA64PFR1_EL1,
2370 .field_pos = ID_AA64PFR1_SSBS_SHIFT,
2371 .field_width = 4,
2372 .sign = FTR_UNSIGNED,
2373 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
2374 },
2375 #ifdef CONFIG_ARM64_CNP
2376 {
2377 .desc = "Common not Private translations",
2378 .capability = ARM64_HAS_CNP,
2379 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2380 .matches = has_useable_cnp,
2381 .sys_reg = SYS_ID_AA64MMFR2_EL1,
2382 .sign = FTR_UNSIGNED,
2383 .field_pos = ID_AA64MMFR2_CNP_SHIFT,
2384 .field_width = 4,
2385 .min_field_value = 1,
2386 .cpu_enable = cpu_enable_cnp,
2387 },
2388 #endif
2389 {
2390 .desc = "Speculation barrier (SB)",
2391 .capability = ARM64_HAS_SB,
2392 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2393 .matches = has_cpuid_feature,
2394 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2395 .field_pos = ID_AA64ISAR1_EL1_SB_SHIFT,
2396 .field_width = 4,
2397 .sign = FTR_UNSIGNED,
2398 .min_field_value = 1,
2399 },
2400 #ifdef CONFIG_ARM64_PTR_AUTH
2401 {
2402 .desc = "Address authentication (architected QARMA5 algorithm)",
2403 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5,
2404 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2405 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2406 .sign = FTR_UNSIGNED,
2407 .field_pos = ID_AA64ISAR1_EL1_APA_SHIFT,
2408 .field_width = 4,
2409 .min_field_value = ID_AA64ISAR1_EL1_APA_PAuth,
2410 .matches = has_address_auth_cpucap,
2411 },
2412 {
2413 .desc = "Address authentication (architected QARMA3 algorithm)",
2414 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3,
2415 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2416 .sys_reg = SYS_ID_AA64ISAR2_EL1,
2417 .sign = FTR_UNSIGNED,
2418 .field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT,
2419 .field_width = 4,
2420 .min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth,
2421 .matches = has_address_auth_cpucap,
2422 },
2423 {
2424 .desc = "Address authentication (IMP DEF algorithm)",
2425 .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
2426 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2427 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2428 .sign = FTR_UNSIGNED,
2429 .field_pos = ID_AA64ISAR1_EL1_API_SHIFT,
2430 .field_width = 4,
2431 .min_field_value = ID_AA64ISAR1_EL1_API_PAuth,
2432 .matches = has_address_auth_cpucap,
2433 },
2434 {
2435 .capability = ARM64_HAS_ADDRESS_AUTH,
2436 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2437 .matches = has_address_auth_metacap,
2438 },
2439 {
2440 .desc = "Generic authentication (architected QARMA5 algorithm)",
2441 .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5,
2442 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2443 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2444 .sign = FTR_UNSIGNED,
2445 .field_pos = ID_AA64ISAR1_EL1_GPA_SHIFT,
2446 .field_width = 4,
2447 .min_field_value = ID_AA64ISAR1_EL1_GPA_IMP,
2448 .matches = has_cpuid_feature,
2449 },
2450 {
2451 .desc = "Generic authentication (architected QARMA3 algorithm)",
2452 .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3,
2453 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2454 .sys_reg = SYS_ID_AA64ISAR2_EL1,
2455 .sign = FTR_UNSIGNED,
2456 .field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT,
2457 .field_width = 4,
2458 .min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP,
2459 .matches = has_cpuid_feature,
2460 },
2461 {
2462 .desc = "Generic authentication (IMP DEF algorithm)",
2463 .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
2464 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2465 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2466 .sign = FTR_UNSIGNED,
2467 .field_pos = ID_AA64ISAR1_EL1_GPI_SHIFT,
2468 .field_width = 4,
2469 .min_field_value = ID_AA64ISAR1_EL1_GPI_IMP,
2470 .matches = has_cpuid_feature,
2471 },
2472 {
2473 .capability = ARM64_HAS_GENERIC_AUTH,
2474 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2475 .matches = has_generic_auth,
2476 },
2477 #endif
2478 #ifdef CONFIG_ARM64_PSEUDO_NMI
2479 {
2480
2481
2482
2483 .desc = "IRQ priority masking",
2484 .capability = ARM64_HAS_IRQ_PRIO_MASKING,
2485 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2486 .matches = can_use_gic_priorities,
2487 .sys_reg = SYS_ID_AA64PFR0_EL1,
2488 .field_pos = ID_AA64PFR0_GIC_SHIFT,
2489 .field_width = 4,
2490 .sign = FTR_UNSIGNED,
2491 .min_field_value = 1,
2492 },
2493 #endif
2494 #ifdef CONFIG_ARM64_E0PD
2495 {
2496 .desc = "E0PD",
2497 .capability = ARM64_HAS_E0PD,
2498 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2499 .sys_reg = SYS_ID_AA64MMFR2_EL1,
2500 .sign = FTR_UNSIGNED,
2501 .field_width = 4,
2502 .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
2503 .matches = has_cpuid_feature,
2504 .min_field_value = 1,
2505 .cpu_enable = cpu_enable_e0pd,
2506 },
2507 #endif
2508 {
2509 .desc = "Random Number Generator",
2510 .capability = ARM64_HAS_RNG,
2511 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2512 .matches = has_cpuid_feature,
2513 .sys_reg = SYS_ID_AA64ISAR0_EL1,
2514 .field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT,
2515 .field_width = 4,
2516 .sign = FTR_UNSIGNED,
2517 .min_field_value = 1,
2518 },
2519 #ifdef CONFIG_ARM64_BTI
2520 {
2521 .desc = "Branch Target Identification",
2522 .capability = ARM64_BTI,
2523 #ifdef CONFIG_ARM64_BTI_KERNEL
2524 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2525 #else
2526 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2527 #endif
2528 .matches = has_cpuid_feature,
2529 .cpu_enable = bti_enable,
2530 .sys_reg = SYS_ID_AA64PFR1_EL1,
2531 .field_pos = ID_AA64PFR1_BT_SHIFT,
2532 .field_width = 4,
2533 .min_field_value = ID_AA64PFR1_BT_BTI,
2534 .sign = FTR_UNSIGNED,
2535 },
2536 #endif
2537 #ifdef CONFIG_ARM64_MTE
2538 {
2539 .desc = "Memory Tagging Extension",
2540 .capability = ARM64_MTE,
2541 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2542 .matches = has_cpuid_feature,
2543 .sys_reg = SYS_ID_AA64PFR1_EL1,
2544 .field_pos = ID_AA64PFR1_MTE_SHIFT,
2545 .field_width = 4,
2546 .min_field_value = ID_AA64PFR1_MTE,
2547 .sign = FTR_UNSIGNED,
2548 .cpu_enable = cpu_enable_mte,
2549 },
2550 {
2551 .desc = "Asymmetric MTE Tag Check Fault",
2552 .capability = ARM64_MTE_ASYMM,
2553 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2554 .matches = has_cpuid_feature,
2555 .sys_reg = SYS_ID_AA64PFR1_EL1,
2556 .field_pos = ID_AA64PFR1_MTE_SHIFT,
2557 .field_width = 4,
2558 .min_field_value = ID_AA64PFR1_MTE_ASYMM,
2559 .sign = FTR_UNSIGNED,
2560 },
2561 #endif
2562 {
2563 .desc = "RCpc load-acquire (LDAPR)",
2564 .capability = ARM64_HAS_LDAPR,
2565 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2566 .sys_reg = SYS_ID_AA64ISAR1_EL1,
2567 .sign = FTR_UNSIGNED,
2568 .field_pos = ID_AA64ISAR1_EL1_LRCPC_SHIFT,
2569 .field_width = 4,
2570 .matches = has_cpuid_feature,
2571 .min_field_value = 1,
2572 },
2573 #ifdef CONFIG_ARM64_SME
2574 {
2575 .desc = "Scalable Matrix Extension",
2576 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2577 .capability = ARM64_SME,
2578 .sys_reg = SYS_ID_AA64PFR1_EL1,
2579 .sign = FTR_UNSIGNED,
2580 .field_pos = ID_AA64PFR1_SME_SHIFT,
2581 .field_width = 4,
2582 .min_field_value = ID_AA64PFR1_SME,
2583 .matches = has_cpuid_feature,
2584 .cpu_enable = sme_kernel_enable,
2585 },
2586
2587 {
2588 .desc = "FA64",
2589 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2590 .capability = ARM64_SME_FA64,
2591 .sys_reg = SYS_ID_AA64SMFR0_EL1,
2592 .sign = FTR_UNSIGNED,
2593 .field_pos = ID_AA64SMFR0_EL1_FA64_SHIFT,
2594 .field_width = 1,
2595 .min_field_value = ID_AA64SMFR0_EL1_FA64_IMP,
2596 .matches = has_cpuid_feature,
2597 .cpu_enable = fa64_kernel_enable,
2598 },
2599 #endif
2600 {
2601 .desc = "WFx with timeout",
2602 .capability = ARM64_HAS_WFXT,
2603 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2604 .sys_reg = SYS_ID_AA64ISAR2_EL1,
2605 .sign = FTR_UNSIGNED,
2606 .field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT,
2607 .field_width = 4,
2608 .matches = has_cpuid_feature,
2609 .min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP,
2610 },
2611 {
2612 .desc = "Trap EL0 IMPLEMENTATION DEFINED functionality",
2613 .capability = ARM64_HAS_TIDCP1,
2614 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2615 .sys_reg = SYS_ID_AA64MMFR1_EL1,
2616 .sign = FTR_UNSIGNED,
2617 .field_pos = ID_AA64MMFR1_TIDCP1_SHIFT,
2618 .field_width = 4,
2619 .min_field_value = ID_AA64MMFR1_TIDCP1_IMP,
2620 .matches = has_cpuid_feature,
2621 .cpu_enable = cpu_trap_el0_impdef,
2622 },
2623 {},
2624 };
2625
2626 #define HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \
2627 .matches = has_cpuid_feature, \
2628 .sys_reg = reg, \
2629 .field_pos = field, \
2630 .field_width = width, \
2631 .sign = s, \
2632 .min_field_value = min_value,
2633
2634 #define __HWCAP_CAP(name, cap_type, cap) \
2635 .desc = name, \
2636 .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
2637 .hwcap_type = cap_type, \
2638 .hwcap = cap, \
2639
2640 #define HWCAP_CAP(reg, field, width, s, min_value, cap_type, cap) \
2641 { \
2642 __HWCAP_CAP(#cap, cap_type, cap) \
2643 HWCAP_CPUID_MATCH(reg, field, width, s, min_value) \
2644 }
2645
2646 #define HWCAP_MULTI_CAP(list, cap_type, cap) \
2647 { \
2648 __HWCAP_CAP(#cap, cap_type, cap) \
2649 .matches = cpucap_multi_entry_cap_matches, \
2650 .match_list = list, \
2651 }
2652
2653 #define HWCAP_CAP_MATCH(match, cap_type, cap) \
2654 { \
2655 __HWCAP_CAP(#cap, cap_type, cap) \
2656 .matches = match, \
2657 }
2658
2659 #ifdef CONFIG_ARM64_PTR_AUTH
2660 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
2661 {
2662 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_APA_SHIFT,
2663 4, FTR_UNSIGNED,
2664 ID_AA64ISAR1_EL1_APA_PAuth)
2665 },
2666 {
2667 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_APA3_SHIFT,
2668 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_APA3_PAuth)
2669 },
2670 {
2671 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_API_SHIFT,
2672 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_API_PAuth)
2673 },
2674 {},
2675 };
2676
2677 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
2678 {
2679 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPA_SHIFT,
2680 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPA_IMP)
2681 },
2682 {
2683 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_GPA3_SHIFT,
2684 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_GPA3_IMP)
2685 },
2686 {
2687 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPI_SHIFT,
2688 4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPI_IMP)
2689 },
2690 {},
2691 };
2692 #endif
2693
2694 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
2695 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
2696 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
2697 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
2698 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
2699 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
2700 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
2701 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
2702 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
2703 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
2704 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
2705 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
2706 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_DP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
2707 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
2708 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
2709 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_TS_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
2710 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
2711 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
2712 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
2713 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
2714 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
2715 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
2716 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
2717 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
2718 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
2719 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
2720 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
2721 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
2722 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
2723 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_SB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
2724 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
2725 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_EBF16),
2726 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
2727 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
2728 HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
2729 #ifdef CONFIG_ARM64_SVE
2730 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
2731 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SVEver_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
2732 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
2733 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_AES_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_AES_PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
2734 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BitPerm_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
2735 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_BF16_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
2736 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SHA3_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
2737 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_SM4_IMP, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
2738 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_I8MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
2739 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F32MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
2740 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, FTR_UNSIGNED, ID_AA64ZFR0_EL1_F64MM_IMP, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
2741 #endif
2742 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
2743 #ifdef CONFIG_ARM64_BTI
2744 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
2745 #endif
2746 #ifdef CONFIG_ARM64_PTR_AUTH
2747 HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
2748 HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
2749 #endif
2750 #ifdef CONFIG_ARM64_MTE
2751 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
2752 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3),
2753 #endif
2754 HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
2755 HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
2756 HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
2757 HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
2758 #ifdef CONFIG_ARM64_SME
2759 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
2760 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
2761 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I16I64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
2762 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F64F64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
2763 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_EL1_I8I32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
2764 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
2765 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_B16F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
2766 HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_F32F32_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
2767 #endif
2768 {},
2769 };
2770
2771 #ifdef CONFIG_COMPAT
2772 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
2773 {
2774
2775
2776
2777
2778
2779 u32 mvfr1;
2780
2781 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
2782 if (scope == SCOPE_SYSTEM)
2783 mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
2784 else
2785 mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
2786
2787 return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
2788 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
2789 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
2790 }
2791 #endif
2792
2793 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
2794 #ifdef CONFIG_COMPAT
2795 HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
2796 HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
2797
2798 HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
2799 HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
2800 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 4, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
2801 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
2802 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
2803 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
2804 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 4, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
2805 #endif
2806 {},
2807 };
2808
2809 static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2810 {
2811 switch (cap->hwcap_type) {
2812 case CAP_HWCAP:
2813 cpu_set_feature(cap->hwcap);
2814 break;
2815 #ifdef CONFIG_COMPAT
2816 case CAP_COMPAT_HWCAP:
2817 compat_elf_hwcap |= (u32)cap->hwcap;
2818 break;
2819 case CAP_COMPAT_HWCAP2:
2820 compat_elf_hwcap2 |= (u32)cap->hwcap;
2821 break;
2822 #endif
2823 default:
2824 WARN_ON(1);
2825 break;
2826 }
2827 }
2828
2829
2830 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2831 {
2832 bool rc;
2833
2834 switch (cap->hwcap_type) {
2835 case CAP_HWCAP:
2836 rc = cpu_have_feature(cap->hwcap);
2837 break;
2838 #ifdef CONFIG_COMPAT
2839 case CAP_COMPAT_HWCAP:
2840 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
2841 break;
2842 case CAP_COMPAT_HWCAP2:
2843 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
2844 break;
2845 #endif
2846 default:
2847 WARN_ON(1);
2848 rc = false;
2849 }
2850
2851 return rc;
2852 }
2853
2854 static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2855 {
2856
2857 cpu_set_named_feature(CPUID);
2858 for (; hwcaps->matches; hwcaps++)
2859 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
2860 cap_set_elf_hwcap(hwcaps);
2861 }
2862
2863 static void update_cpu_capabilities(u16 scope_mask)
2864 {
2865 int i;
2866 const struct arm64_cpu_capabilities *caps;
2867
2868 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2869 for (i = 0; i < ARM64_NCAPS; i++) {
2870 caps = cpu_hwcaps_ptrs[i];
2871 if (!caps || !(caps->type & scope_mask) ||
2872 cpus_have_cap(caps->capability) ||
2873 !caps->matches(caps, cpucap_default_scope(caps)))
2874 continue;
2875
2876 if (caps->desc)
2877 pr_info("detected: %s\n", caps->desc);
2878 cpus_set_cap(caps->capability);
2879
2880 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
2881 set_bit(caps->capability, boot_capabilities);
2882 }
2883 }
2884
2885
2886
2887
2888
2889 static int cpu_enable_non_boot_scope_capabilities(void *__unused)
2890 {
2891 int i;
2892 u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
2893
2894 for_each_available_cap(i) {
2895 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
2896
2897 if (WARN_ON(!cap))
2898 continue;
2899
2900 if (!(cap->type & non_boot_scope))
2901 continue;
2902
2903 if (cap->cpu_enable)
2904 cap->cpu_enable(cap);
2905 }
2906 return 0;
2907 }
2908
2909
2910
2911
2912
2913 static void __init enable_cpu_capabilities(u16 scope_mask)
2914 {
2915 int i;
2916 const struct arm64_cpu_capabilities *caps;
2917 bool boot_scope;
2918
2919 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2920 boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
2921
2922 for (i = 0; i < ARM64_NCAPS; i++) {
2923 unsigned int num;
2924
2925 caps = cpu_hwcaps_ptrs[i];
2926 if (!caps || !(caps->type & scope_mask))
2927 continue;
2928 num = caps->capability;
2929 if (!cpus_have_cap(num))
2930 continue;
2931
2932
2933 static_branch_enable(&cpu_hwcap_keys[num]);
2934
2935 if (boot_scope && caps->cpu_enable)
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945 caps->cpu_enable(caps);
2946 }
2947
2948
2949
2950
2951
2952
2953
2954 if (!boot_scope)
2955 stop_machine(cpu_enable_non_boot_scope_capabilities,
2956 NULL, cpu_online_mask);
2957 }
2958
2959
2960
2961
2962
2963
2964 static void verify_local_cpu_caps(u16 scope_mask)
2965 {
2966 int i;
2967 bool cpu_has_cap, system_has_cap;
2968 const struct arm64_cpu_capabilities *caps;
2969
2970 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2971
2972 for (i = 0; i < ARM64_NCAPS; i++) {
2973 caps = cpu_hwcaps_ptrs[i];
2974 if (!caps || !(caps->type & scope_mask))
2975 continue;
2976
2977 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
2978 system_has_cap = cpus_have_cap(caps->capability);
2979
2980 if (system_has_cap) {
2981
2982
2983
2984
2985 if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
2986 break;
2987
2988
2989
2990
2991
2992
2993 if (caps->cpu_enable)
2994 caps->cpu_enable(caps);
2995 } else {
2996
2997
2998
2999
3000 if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
3001 break;
3002 }
3003 }
3004
3005 if (i < ARM64_NCAPS) {
3006 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
3007 smp_processor_id(), caps->capability,
3008 caps->desc, system_has_cap, cpu_has_cap);
3009
3010 if (cpucap_panic_on_conflict(caps))
3011 cpu_panic_kernel();
3012 else
3013 cpu_die_early();
3014 }
3015 }
3016
3017
3018
3019
3020
3021 static void check_early_cpu_features(void)
3022 {
3023 verify_cpu_asid_bits();
3024
3025 verify_local_cpu_caps(SCOPE_BOOT_CPU);
3026 }
3027
3028 static void
3029 __verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
3030 {
3031
3032 for (; caps->matches; caps++)
3033 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
3034 pr_crit("CPU%d: missing HWCAP: %s\n",
3035 smp_processor_id(), caps->desc);
3036 cpu_die_early();
3037 }
3038 }
3039
3040 static void verify_local_elf_hwcaps(void)
3041 {
3042 __verify_local_elf_hwcaps(arm64_elf_hwcaps);
3043
3044 if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
3045 __verify_local_elf_hwcaps(compat_elf_hwcaps);
3046 }
3047
3048 static void verify_sve_features(void)
3049 {
3050 u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
3051 u64 zcr = read_zcr_features();
3052
3053 unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
3054 unsigned int len = zcr & ZCR_ELx_LEN_MASK;
3055
3056 if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
3057 pr_crit("CPU%d: SVE: vector length support mismatch\n",
3058 smp_processor_id());
3059 cpu_die_early();
3060 }
3061
3062
3063 }
3064
3065 static void verify_sme_features(void)
3066 {
3067 u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
3068 u64 smcr = read_smcr_features();
3069
3070 unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK;
3071 unsigned int len = smcr & SMCR_ELx_LEN_MASK;
3072
3073 if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) {
3074 pr_crit("CPU%d: SME: vector length support mismatch\n",
3075 smp_processor_id());
3076 cpu_die_early();
3077 }
3078
3079
3080 }
3081
3082 static void verify_hyp_capabilities(void)
3083 {
3084 u64 safe_mmfr1, mmfr0, mmfr1;
3085 int parange, ipa_max;
3086 unsigned int safe_vmid_bits, vmid_bits;
3087
3088 if (!IS_ENABLED(CONFIG_KVM))
3089 return;
3090
3091 safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
3092 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
3093 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
3094
3095
3096 safe_vmid_bits = get_vmid_bits(safe_mmfr1);
3097 vmid_bits = get_vmid_bits(mmfr1);
3098 if (vmid_bits < safe_vmid_bits) {
3099 pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
3100 cpu_die_early();
3101 }
3102
3103
3104 parange = cpuid_feature_extract_unsigned_field(mmfr0,
3105 ID_AA64MMFR0_PARANGE_SHIFT);
3106 ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
3107 if (ipa_max < get_kvm_ipa_limit()) {
3108 pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
3109 cpu_die_early();
3110 }
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121 static void verify_local_cpu_capabilities(void)
3122 {
3123
3124
3125
3126
3127
3128 verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
3129 verify_local_elf_hwcaps();
3130
3131 if (system_supports_sve())
3132 verify_sve_features();
3133
3134 if (system_supports_sme())
3135 verify_sme_features();
3136
3137 if (is_hyp_mode_available())
3138 verify_hyp_capabilities();
3139 }
3140
3141 void check_local_cpu_capabilities(void)
3142 {
3143
3144
3145
3146
3147 check_early_cpu_features();
3148
3149
3150
3151
3152
3153
3154
3155 if (!system_capabilities_finalized())
3156 update_cpu_capabilities(SCOPE_LOCAL_CPU);
3157 else
3158 verify_local_cpu_capabilities();
3159 }
3160
3161 static void __init setup_boot_cpu_capabilities(void)
3162 {
3163
3164 update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
3165
3166 enable_cpu_capabilities(SCOPE_BOOT_CPU);
3167 }
3168
3169 bool this_cpu_has_cap(unsigned int n)
3170 {
3171 if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
3172 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
3173
3174 if (cap)
3175 return cap->matches(cap, SCOPE_LOCAL_CPU);
3176 }
3177
3178 return false;
3179 }
3180 EXPORT_SYMBOL_GPL(this_cpu_has_cap);
3181
3182
3183
3184
3185
3186
3187
3188 static bool __maybe_unused __system_matches_cap(unsigned int n)
3189 {
3190 if (n < ARM64_NCAPS) {
3191 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
3192
3193 if (cap)
3194 return cap->matches(cap, SCOPE_SYSTEM);
3195 }
3196 return false;
3197 }
3198
3199 void cpu_set_feature(unsigned int num)
3200 {
3201 set_bit(num, elf_hwcap);
3202 }
3203
3204 bool cpu_have_feature(unsigned int num)
3205 {
3206 return test_bit(num, elf_hwcap);
3207 }
3208 EXPORT_SYMBOL_GPL(cpu_have_feature);
3209
3210 unsigned long cpu_get_elf_hwcap(void)
3211 {
3212
3213
3214
3215
3216
3217 return elf_hwcap[0];
3218 }
3219
3220 unsigned long cpu_get_elf_hwcap2(void)
3221 {
3222 return elf_hwcap[1];
3223 }
3224
3225 static void __init setup_system_capabilities(void)
3226 {
3227
3228
3229
3230
3231
3232
3233 update_cpu_capabilities(SCOPE_SYSTEM);
3234 enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
3235 }
3236
3237 void __init setup_cpu_features(void)
3238 {
3239 u32 cwg;
3240
3241 setup_system_capabilities();
3242 setup_elf_hwcaps(arm64_elf_hwcaps);
3243
3244 if (system_supports_32bit_el0()) {
3245 setup_elf_hwcaps(compat_elf_hwcaps);
3246 elf_hwcap_fixup();
3247 }
3248
3249 if (system_uses_ttbr0_pan())
3250 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
3251
3252 sve_setup();
3253 sme_setup();
3254 minsigstksz_setup();
3255
3256
3257 finalize_system_capabilities();
3258
3259
3260
3261
3262 cwg = cache_type_cwg();
3263 if (!cwg)
3264 pr_warn("No Cache Writeback Granule information, assuming %d\n",
3265 ARCH_DMA_MINALIGN);
3266 }
3267
3268 static int enable_mismatched_32bit_el0(unsigned int cpu)
3269 {
3270
3271
3272
3273
3274
3275 static int lucky_winner = -1;
3276
3277 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
3278 bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
3279
3280 if (cpu_32bit) {
3281 cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
3282 static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
3283 }
3284
3285 if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
3286 return 0;
3287
3288 if (lucky_winner >= 0)
3289 return 0;
3290
3291
3292
3293
3294
3295
3296 lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
3297 cpu_active_mask);
3298 get_cpu_device(lucky_winner)->offline_disabled = true;
3299 setup_elf_hwcaps(compat_elf_hwcaps);
3300 elf_hwcap_fixup();
3301 pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
3302 cpu, lucky_winner);
3303 return 0;
3304 }
3305
3306 static int __init init_32bit_el0_mask(void)
3307 {
3308 if (!allow_mismatched_32bit_el0)
3309 return 0;
3310
3311 if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
3312 return -ENOMEM;
3313
3314 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
3315 "arm64/mismatched_32bit_el0:online",
3316 enable_mismatched_32bit_el0, NULL);
3317 }
3318 subsys_initcall_sync(init_32bit_el0_mask);
3319
3320 static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
3321 {
3322 cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
3323 }
3324
3325
3326
3327
3328
3329
3330
3331 static inline bool __attribute_const__ is_emulated(u32 id)
3332 {
3333 return (sys_reg_Op0(id) == 0x3 &&
3334 sys_reg_CRn(id) == 0x0 &&
3335 sys_reg_Op1(id) == 0x0 &&
3336 (sys_reg_CRm(id) == 0 ||
3337 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
3338 }
3339
3340
3341
3342
3343
3344 static inline int emulate_id_reg(u32 id, u64 *valp)
3345 {
3346 switch (id) {
3347 case SYS_MIDR_EL1:
3348 *valp = read_cpuid_id();
3349 break;
3350 case SYS_MPIDR_EL1:
3351 *valp = SYS_MPIDR_SAFE_VAL;
3352 break;
3353 case SYS_REVIDR_EL1:
3354
3355 *valp = 0;
3356 break;
3357 default:
3358 return -EINVAL;
3359 }
3360
3361 return 0;
3362 }
3363
3364 static int emulate_sys_reg(u32 id, u64 *valp)
3365 {
3366 struct arm64_ftr_reg *regp;
3367
3368 if (!is_emulated(id))
3369 return -EINVAL;
3370
3371 if (sys_reg_CRm(id) == 0)
3372 return emulate_id_reg(id, valp);
3373
3374 regp = get_arm64_ftr_reg_nowarn(id);
3375 if (regp)
3376 *valp = arm64_ftr_reg_user_value(regp);
3377 else
3378
3379
3380
3381
3382 *valp = 0;
3383 return 0;
3384 }
3385
3386 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
3387 {
3388 int rc;
3389 u64 val;
3390
3391 rc = emulate_sys_reg(sys_reg, &val);
3392 if (!rc) {
3393 pt_regs_write_reg(regs, rt, val);
3394 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
3395 }
3396 return rc;
3397 }
3398
3399 static int emulate_mrs(struct pt_regs *regs, u32 insn)
3400 {
3401 u32 sys_reg, rt;
3402
3403
3404
3405
3406
3407 sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
3408 rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
3409 return do_emulate_mrs(regs, sys_reg, rt);
3410 }
3411
3412 static struct undef_hook mrs_hook = {
3413 .instr_mask = 0xffff0000,
3414 .instr_val = 0xd5380000,
3415 .pstate_mask = PSR_AA32_MODE_MASK,
3416 .pstate_val = PSR_MODE_EL0t,
3417 .fn = emulate_mrs,
3418 };
3419
3420 static int __init enable_mrs_emulation(void)
3421 {
3422 register_undef_hook(&mrs_hook);
3423 return 0;
3424 }
3425
3426 core_initcall(enable_mrs_emulation);
3427
3428 enum mitigation_state arm64_get_meltdown_state(void)
3429 {
3430 if (__meltdown_safe)
3431 return SPECTRE_UNAFFECTED;
3432
3433 if (arm64_kernel_unmapped_at_el0())
3434 return SPECTRE_MITIGATED;
3435
3436 return SPECTRE_VULNERABLE;
3437 }
3438
3439 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
3440 char *buf)
3441 {
3442 switch (arm64_get_meltdown_state()) {
3443 case SPECTRE_UNAFFECTED:
3444 return sprintf(buf, "Not affected\n");
3445
3446 case SPECTRE_MITIGATED:
3447 return sprintf(buf, "Mitigation: PTI\n");
3448
3449 default:
3450 return sprintf(buf, "Vulnerable\n");
3451 }
3452 }