Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Contains CPU specific errata definitions
0004  *
0005  * Copyright (C) 2014 ARM Ltd.
0006  */
0007 
0008 #include <linux/arm-smccc.h>
0009 #include <linux/types.h>
0010 #include <linux/cpu.h>
0011 #include <asm/cpu.h>
0012 #include <asm/cputype.h>
0013 #include <asm/cpufeature.h>
0014 #include <asm/kvm_asm.h>
0015 #include <asm/smp_plat.h>
0016 
0017 static bool __maybe_unused
0018 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
0019 {
0020     const struct arm64_midr_revidr *fix;
0021     u32 midr = read_cpuid_id(), revidr;
0022 
0023     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0024     if (!is_midr_in_range(midr, &entry->midr_range))
0025         return false;
0026 
0027     midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
0028     revidr = read_cpuid(REVIDR_EL1);
0029     for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
0030         if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
0031             return false;
0032 
0033     return true;
0034 }
0035 
0036 static bool __maybe_unused
0037 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
0038                 int scope)
0039 {
0040     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0041     return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
0042 }
0043 
0044 static bool __maybe_unused
0045 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
0046 {
0047     u32 model;
0048 
0049     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0050 
0051     model = read_cpuid_id();
0052     model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
0053          MIDR_ARCHITECTURE_MASK;
0054 
0055     return model == entry->midr_range.model;
0056 }
0057 
0058 static bool
0059 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
0060               int scope)
0061 {
0062     u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
0063     u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
0064     u64 ctr_raw, ctr_real;
0065 
0066     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0067 
0068     /*
0069      * We want to make sure that all the CPUs in the system expose
0070      * a consistent CTR_EL0 to make sure that applications behaves
0071      * correctly with migration.
0072      *
0073      * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
0074      *
0075      * 1) It is safe if the system doesn't support IDC, as CPU anyway
0076      *    reports IDC = 0, consistent with the rest.
0077      *
0078      * 2) If the system has IDC, it is still safe as we trap CTR_EL0
0079      *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
0080      *
0081      * So, we need to make sure either the raw CTR_EL0 or the effective
0082      * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
0083      */
0084     ctr_raw = read_cpuid_cachetype() & mask;
0085     ctr_real = read_cpuid_effective_cachetype() & mask;
0086 
0087     return (ctr_real != sys) && (ctr_raw != sys);
0088 }
0089 
0090 static void
0091 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
0092 {
0093     u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
0094     bool enable_uct_trap = false;
0095 
0096     /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
0097     if ((read_cpuid_cachetype() & mask) !=
0098         (arm64_ftr_reg_ctrel0.sys_val & mask))
0099         enable_uct_trap = true;
0100 
0101     /* ... or if the system is affected by an erratum */
0102     if (cap->capability == ARM64_WORKAROUND_1542419)
0103         enable_uct_trap = true;
0104 
0105     if (enable_uct_trap)
0106         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
0107 }
0108 
0109 #ifdef CONFIG_ARM64_ERRATUM_1463225
0110 static bool
0111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
0112                    int scope)
0113 {
0114     return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
0115 }
0116 #endif
0117 
0118 static void __maybe_unused
0119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
0120 {
0121     sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
0122 }
0123 
0124 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)   \
0125     .matches = is_affected_midr_range,          \
0126     .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
0127 
0128 #define CAP_MIDR_ALL_VERSIONS(model)                    \
0129     .matches = is_affected_midr_range,              \
0130     .midr_range = MIDR_ALL_VERSIONS(model)
0131 
0132 #define MIDR_FIXED(rev, revidr_mask) \
0133     .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
0134 
0135 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)        \
0136     .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,             \
0137     CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
0138 
0139 #define CAP_MIDR_RANGE_LIST(list)               \
0140     .matches = is_affected_midr_range_list,         \
0141     .midr_range_list = list
0142 
0143 /* Errata affecting a range of revisions of  given model variant */
0144 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)  \
0145     ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
0146 
0147 /* Errata affecting a single variant/revision of a model */
0148 #define ERRATA_MIDR_REV(model, var, rev)    \
0149     ERRATA_MIDR_RANGE(model, var, rev, var, rev)
0150 
0151 /* Errata affecting all variants/revisions of a given a model */
0152 #define ERRATA_MIDR_ALL_VERSIONS(model)             \
0153     .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,         \
0154     CAP_MIDR_ALL_VERSIONS(model)
0155 
0156 /* Errata affecting a list of midr ranges, with same work around */
0157 #define ERRATA_MIDR_RANGE_LIST(midr_list)           \
0158     .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,         \
0159     CAP_MIDR_RANGE_LIST(midr_list)
0160 
0161 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
0162     MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
0163     MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
0164     {},
0165 };
0166 
0167 static bool __maybe_unused
0168 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
0169              int scope)
0170 {
0171     int i;
0172 
0173     if (!is_affected_midr_range_list(entry, scope) ||
0174         !is_hyp_mode_available())
0175         return false;
0176 
0177     for_each_possible_cpu(i) {
0178         if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
0179             return true;
0180     }
0181 
0182     return false;
0183 }
0184 
0185 static bool __maybe_unused
0186 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
0187                 int scope)
0188 {
0189     u32 midr = read_cpuid_id();
0190     bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
0191     const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
0192 
0193     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
0194     return is_midr_in_range(midr, &range) && has_dic;
0195 }
0196 
0197 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
0198 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
0199 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
0200     {
0201         ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
0202     },
0203     {
0204         .midr_range.model = MIDR_QCOM_KRYO,
0205         .matches = is_kryo_midr,
0206     },
0207 #endif
0208 #ifdef CONFIG_ARM64_ERRATUM_1286807
0209     {
0210         ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
0211     },
0212     {
0213         /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
0214         ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
0215     },
0216 #endif
0217 #ifdef CONFIG_ARM64_ERRATUM_2441009
0218     {
0219         /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
0220         ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
0221     },
0222 #endif
0223     {},
0224 };
0225 #endif
0226 
0227 #ifdef CONFIG_CAVIUM_ERRATUM_23154
0228 static const struct midr_range cavium_erratum_23154_cpus[] = {
0229     MIDR_ALL_VERSIONS(MIDR_THUNDERX),
0230     MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
0231     MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
0232     MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
0233     MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
0234     MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
0235     MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
0236     MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
0237     MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
0238     {},
0239 };
0240 #endif
0241 
0242 #ifdef CONFIG_CAVIUM_ERRATUM_27456
0243 const struct midr_range cavium_erratum_27456_cpus[] = {
0244     /* Cavium ThunderX, T88 pass 1.x - 2.1 */
0245     MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
0246     /* Cavium ThunderX, T81 pass 1.0 */
0247     MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
0248     {},
0249 };
0250 #endif
0251 
0252 #ifdef CONFIG_CAVIUM_ERRATUM_30115
0253 static const struct midr_range cavium_erratum_30115_cpus[] = {
0254     /* Cavium ThunderX, T88 pass 1.x - 2.2 */
0255     MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
0256     /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
0257     MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
0258     /* Cavium ThunderX, T83 pass 1.0 */
0259     MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
0260     {},
0261 };
0262 #endif
0263 
0264 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
0265 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
0266     {
0267         ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
0268     },
0269     {
0270         .midr_range.model = MIDR_QCOM_KRYO,
0271         .matches = is_kryo_midr,
0272     },
0273     {},
0274 };
0275 #endif
0276 
0277 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
0278 static const struct midr_range workaround_clean_cache[] = {
0279 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
0280     defined(CONFIG_ARM64_ERRATUM_827319) || \
0281     defined(CONFIG_ARM64_ERRATUM_824069)
0282     /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
0283     MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
0284 #endif
0285 #ifdef  CONFIG_ARM64_ERRATUM_819472
0286     /* Cortex-A53 r0p[01] : ARM errata 819472 */
0287     MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
0288 #endif
0289     {},
0290 };
0291 #endif
0292 
0293 #ifdef CONFIG_ARM64_ERRATUM_1418040
0294 /*
0295  * - 1188873 affects r0p0 to r2p0
0296  * - 1418040 affects r0p0 to r3p1
0297  */
0298 static const struct midr_range erratum_1418040_list[] = {
0299     /* Cortex-A76 r0p0 to r3p1 */
0300     MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
0301     /* Neoverse-N1 r0p0 to r3p1 */
0302     MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
0303     /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
0304     MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
0305     {},
0306 };
0307 #endif
0308 
0309 #ifdef CONFIG_ARM64_ERRATUM_845719
0310 static const struct midr_range erratum_845719_list[] = {
0311     /* Cortex-A53 r0p[01234] */
0312     MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
0313     /* Brahma-B53 r0p[0] */
0314     MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
0315     /* Kryo2XX Silver rAp4 */
0316     MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
0317     {},
0318 };
0319 #endif
0320 
0321 #ifdef CONFIG_ARM64_ERRATUM_843419
0322 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
0323     {
0324         /* Cortex-A53 r0p[01234] */
0325         .matches = is_affected_midr_range,
0326         ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
0327         MIDR_FIXED(0x4, BIT(8)),
0328     },
0329     {
0330         /* Brahma-B53 r0p[0] */
0331         .matches = is_affected_midr_range,
0332         ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
0333     },
0334     {},
0335 };
0336 #endif
0337 
0338 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
0339 static const struct midr_range erratum_speculative_at_list[] = {
0340 #ifdef CONFIG_ARM64_ERRATUM_1165522
0341     /* Cortex A76 r0p0 to r2p0 */
0342     MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
0343 #endif
0344 #ifdef CONFIG_ARM64_ERRATUM_1319367
0345     MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
0346     MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
0347 #endif
0348 #ifdef CONFIG_ARM64_ERRATUM_1530923
0349     /* Cortex A55 r0p0 to r2p0 */
0350     MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
0351     /* Kryo4xx Silver (rdpe => r1p0) */
0352     MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
0353 #endif
0354     {},
0355 };
0356 #endif
0357 
0358 #ifdef CONFIG_ARM64_ERRATUM_1463225
0359 static const struct midr_range erratum_1463225[] = {
0360     /* Cortex-A76 r0p0 - r3p1 */
0361     MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
0362     /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
0363     MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
0364     {},
0365 };
0366 #endif
0367 
0368 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
0369 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
0370 #ifdef CONFIG_ARM64_ERRATUM_2139208
0371     MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
0372 #endif
0373 #ifdef CONFIG_ARM64_ERRATUM_2119858
0374     MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
0375     MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
0376 #endif
0377     {},
0378 };
0379 #endif  /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
0380 
0381 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
0382 static const struct midr_range tsb_flush_fail_cpus[] = {
0383 #ifdef CONFIG_ARM64_ERRATUM_2067961
0384     MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
0385 #endif
0386 #ifdef CONFIG_ARM64_ERRATUM_2054223
0387     MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
0388 #endif
0389     {},
0390 };
0391 #endif  /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
0392 
0393 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
0394 static struct midr_range trbe_write_out_of_range_cpus[] = {
0395 #ifdef CONFIG_ARM64_ERRATUM_2253138
0396     MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
0397 #endif
0398 #ifdef CONFIG_ARM64_ERRATUM_2224489
0399     MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
0400     MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
0401 #endif
0402     {},
0403 };
0404 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
0405 
0406 #ifdef CONFIG_ARM64_ERRATUM_1742098
0407 static struct midr_range broken_aarch32_aes[] = {
0408     MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
0409     MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
0410     {},
0411 };
0412 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
0413 
0414 const struct arm64_cpu_capabilities arm64_errata[] = {
0415 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
0416     {
0417         .desc = "ARM errata 826319, 827319, 824069, or 819472",
0418         .capability = ARM64_WORKAROUND_CLEAN_CACHE,
0419         ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
0420         .cpu_enable = cpu_enable_cache_maint_trap,
0421     },
0422 #endif
0423 #ifdef CONFIG_ARM64_ERRATUM_832075
0424     {
0425     /* Cortex-A57 r0p0 - r1p2 */
0426         .desc = "ARM erratum 832075",
0427         .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
0428         ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
0429                   0, 0,
0430                   1, 2),
0431     },
0432 #endif
0433 #ifdef CONFIG_ARM64_ERRATUM_834220
0434     {
0435     /* Cortex-A57 r0p0 - r1p2 */
0436         .desc = "ARM erratum 834220",
0437         .capability = ARM64_WORKAROUND_834220,
0438         ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
0439                   0, 0,
0440                   1, 2),
0441     },
0442 #endif
0443 #ifdef CONFIG_ARM64_ERRATUM_843419
0444     {
0445         .desc = "ARM erratum 843419",
0446         .capability = ARM64_WORKAROUND_843419,
0447         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0448         .matches = cpucap_multi_entry_cap_matches,
0449         .match_list = erratum_843419_list,
0450     },
0451 #endif
0452 #ifdef CONFIG_ARM64_ERRATUM_845719
0453     {
0454         .desc = "ARM erratum 845719",
0455         .capability = ARM64_WORKAROUND_845719,
0456         ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
0457     },
0458 #endif
0459 #ifdef CONFIG_CAVIUM_ERRATUM_23154
0460     {
0461         .desc = "Cavium errata 23154 and 38545",
0462         .capability = ARM64_WORKAROUND_CAVIUM_23154,
0463         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0464         ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
0465     },
0466 #endif
0467 #ifdef CONFIG_CAVIUM_ERRATUM_27456
0468     {
0469         .desc = "Cavium erratum 27456",
0470         .capability = ARM64_WORKAROUND_CAVIUM_27456,
0471         ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
0472     },
0473 #endif
0474 #ifdef CONFIG_CAVIUM_ERRATUM_30115
0475     {
0476         .desc = "Cavium erratum 30115",
0477         .capability = ARM64_WORKAROUND_CAVIUM_30115,
0478         ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
0479     },
0480 #endif
0481     {
0482         .desc = "Mismatched cache type (CTR_EL0)",
0483         .capability = ARM64_MISMATCHED_CACHE_TYPE,
0484         .matches = has_mismatched_cache_type,
0485         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0486         .cpu_enable = cpu_enable_trap_ctr_access,
0487     },
0488 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
0489     {
0490         .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
0491         .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
0492         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0493         .matches = cpucap_multi_entry_cap_matches,
0494         .match_list = qcom_erratum_1003_list,
0495     },
0496 #endif
0497 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
0498     {
0499         .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
0500         .capability = ARM64_WORKAROUND_REPEAT_TLBI,
0501         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0502         .matches = cpucap_multi_entry_cap_matches,
0503         .match_list = arm64_repeat_tlbi_list,
0504     },
0505 #endif
0506 #ifdef CONFIG_ARM64_ERRATUM_858921
0507     {
0508     /* Cortex-A73 all versions */
0509         .desc = "ARM erratum 858921",
0510         .capability = ARM64_WORKAROUND_858921,
0511         ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
0512     },
0513 #endif
0514     {
0515         .desc = "Spectre-v2",
0516         .capability = ARM64_SPECTRE_V2,
0517         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0518         .matches = has_spectre_v2,
0519         .cpu_enable = spectre_v2_enable_mitigation,
0520     },
0521 #ifdef CONFIG_RANDOMIZE_BASE
0522     {
0523     /* Must come after the Spectre-v2 entry */
0524         .desc = "Spectre-v3a",
0525         .capability = ARM64_SPECTRE_V3A,
0526         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0527         .matches = has_spectre_v3a,
0528         .cpu_enable = spectre_v3a_enable_mitigation,
0529     },
0530 #endif
0531     {
0532         .desc = "Spectre-v4",
0533         .capability = ARM64_SPECTRE_V4,
0534         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0535         .matches = has_spectre_v4,
0536         .cpu_enable = spectre_v4_enable_mitigation,
0537     },
0538     {
0539         .desc = "Spectre-BHB",
0540         .capability = ARM64_SPECTRE_BHB,
0541         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0542         .matches = is_spectre_bhb_affected,
0543         .cpu_enable = spectre_bhb_enable_mitigation,
0544     },
0545 #ifdef CONFIG_ARM64_ERRATUM_1418040
0546     {
0547         .desc = "ARM erratum 1418040",
0548         .capability = ARM64_WORKAROUND_1418040,
0549         ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
0550         /*
0551          * We need to allow affected CPUs to come in late, but
0552          * also need the non-affected CPUs to be able to come
0553          * in at any point in time. Wonderful.
0554          */
0555         .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
0556     },
0557 #endif
0558 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
0559     {
0560         .desc = "ARM errata 1165522, 1319367, or 1530923",
0561         .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
0562         ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
0563     },
0564 #endif
0565 #ifdef CONFIG_ARM64_ERRATUM_1463225
0566     {
0567         .desc = "ARM erratum 1463225",
0568         .capability = ARM64_WORKAROUND_1463225,
0569         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0570         .matches = has_cortex_a76_erratum_1463225,
0571         .midr_range_list = erratum_1463225,
0572     },
0573 #endif
0574 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
0575     {
0576         .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
0577         .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
0578         ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
0579         .matches = needs_tx2_tvm_workaround,
0580     },
0581     {
0582         .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
0583         .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
0584         ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
0585     },
0586 #endif
0587 #ifdef CONFIG_ARM64_ERRATUM_1542419
0588     {
0589         /* we depend on the firmware portion for correctness */
0590         .desc = "ARM erratum 1542419 (kernel portion)",
0591         .capability = ARM64_WORKAROUND_1542419,
0592         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0593         .matches = has_neoverse_n1_erratum_1542419,
0594         .cpu_enable = cpu_enable_trap_ctr_access,
0595     },
0596 #endif
0597 #ifdef CONFIG_ARM64_ERRATUM_1508412
0598     {
0599         /* we depend on the firmware portion for correctness */
0600         .desc = "ARM erratum 1508412 (kernel portion)",
0601         .capability = ARM64_WORKAROUND_1508412,
0602         ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
0603                   0, 0,
0604                   1, 0),
0605     },
0606 #endif
0607 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
0608     {
0609         /* NVIDIA Carmel */
0610         .desc = "NVIDIA Carmel CNP erratum",
0611         .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
0612         ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
0613     },
0614 #endif
0615 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
0616     {
0617         /*
0618          * The erratum work around is handled within the TRBE
0619          * driver and can be applied per-cpu. So, we can allow
0620          * a late CPU to come online with this erratum.
0621          */
0622         .desc = "ARM erratum 2119858 or 2139208",
0623         .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
0624         .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
0625         CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
0626     },
0627 #endif
0628 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
0629     {
0630         .desc = "ARM erratum 2067961 or 2054223",
0631         .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
0632         ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
0633     },
0634 #endif
0635 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
0636     {
0637         .desc = "ARM erratum 2253138 or 2224489",
0638         .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
0639         .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
0640         CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
0641     },
0642 #endif
0643 #ifdef CONFIG_ARM64_ERRATUM_2077057
0644     {
0645         .desc = "ARM erratum 2077057",
0646         .capability = ARM64_WORKAROUND_2077057,
0647         ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
0648     },
0649 #endif
0650 #ifdef CONFIG_ARM64_ERRATUM_2064142
0651     {
0652         .desc = "ARM erratum 2064142",
0653         .capability = ARM64_WORKAROUND_2064142,
0654 
0655         /* Cortex-A510 r0p0 - r0p2 */
0656         ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
0657     },
0658 #endif
0659 #ifdef CONFIG_ARM64_ERRATUM_2457168
0660     {
0661         .desc = "ARM erratum 2457168",
0662         .capability = ARM64_WORKAROUND_2457168,
0663         .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
0664 
0665         /* Cortex-A510 r0p0-r1p1 */
0666         CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
0667     },
0668 #endif
0669 #ifdef CONFIG_ARM64_ERRATUM_2038923
0670     {
0671         .desc = "ARM erratum 2038923",
0672         .capability = ARM64_WORKAROUND_2038923,
0673 
0674         /* Cortex-A510 r0p0 - r0p2 */
0675         ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
0676     },
0677 #endif
0678 #ifdef CONFIG_ARM64_ERRATUM_1902691
0679     {
0680         .desc = "ARM erratum 1902691",
0681         .capability = ARM64_WORKAROUND_1902691,
0682 
0683         /* Cortex-A510 r0p0 - r0p1 */
0684         ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
0685     },
0686 #endif
0687 #ifdef CONFIG_ARM64_ERRATUM_1742098
0688     {
0689         .desc = "ARM erratum 1742098",
0690         .capability = ARM64_WORKAROUND_1742098,
0691         CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
0692         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
0693     },
0694 #endif
0695     {
0696     }
0697 };