0001
0002
0003
0004
0005
0006 #ifndef __ASM_CPUFEATURE_H
0007 #define __ASM_CPUFEATURE_H
0008
0009 #include <asm/cpucaps.h>
0010 #include <asm/cputype.h>
0011 #include <asm/hwcap.h>
0012 #include <asm/sysreg.h>
0013
0014 #define MAX_CPU_FEATURES 128
0015 #define cpu_feature(x) KERNEL_HWCAP_ ## x
0016
0017 #ifndef __ASSEMBLY__
0018
0019 #include <linux/bug.h>
0020 #include <linux/jump_label.h>
0021 #include <linux/kernel.h>
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 enum ftr_type {
0038 FTR_EXACT,
0039 FTR_LOWER_SAFE,
0040 FTR_HIGHER_SAFE,
0041 FTR_HIGHER_OR_ZERO_SAFE,
0042 };
0043
0044 #define FTR_STRICT true
0045 #define FTR_NONSTRICT false
0046
0047 #define FTR_SIGNED true
0048 #define FTR_UNSIGNED false
0049
0050 #define FTR_VISIBLE true
0051 #define FTR_HIDDEN false
0052
0053 #define FTR_VISIBLE_IF_IS_ENABLED(config) \
0054 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
0055
0056 struct arm64_ftr_bits {
0057 bool sign;
0058 bool visible;
0059 bool strict;
0060 enum ftr_type type;
0061 u8 shift;
0062 u8 width;
0063 s64 safe_val;
0064 };
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 struct arm64_ftr_override {
0084 u64 val;
0085 u64 mask;
0086 };
0087
0088
0089
0090
0091
0092
0093 struct arm64_ftr_reg {
0094 const char *name;
0095 u64 strict_mask;
0096 u64 user_mask;
0097 u64 sys_val;
0098 u64 user_val;
0099 struct arm64_ftr_override *override;
0100 const struct arm64_ftr_bits *ftr_bits;
0101 };
0102
0103 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245 #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
0246 #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
0247
0248
0249
0250
0251
0252 #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
0253 #define ARM64_CPUCAP_SCOPE_MASK \
0254 (ARM64_CPUCAP_SCOPE_SYSTEM | \
0255 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
0256 ARM64_CPUCAP_SCOPE_BOOT_CPU)
0257
0258 #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
0259 #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
0260 #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
0261 #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
0262
0263
0264
0265
0266
0267 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
0268
0269 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
0270
0271 #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
0272
0273
0274
0275
0276
0277
0278
0279
0280 #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
0281 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
0282
0283
0284
0285
0286
0287
0288
0289 #define ARM64_CPUCAP_SYSTEM_FEATURE \
0290 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
0291
0292
0293
0294
0295
0296
0297 #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
0298 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
0299 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
0300 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
0301
0302
0303
0304
0305
0306
0307 #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
0308 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
0309 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
0310
0311
0312
0313
0314
0315
0316 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
0317 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
0318
0319
0320
0321
0322
0323
0324
0325 #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
0326 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
0327
0328 struct arm64_cpu_capabilities {
0329 const char *desc;
0330 u16 capability;
0331 u16 type;
0332 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
0346 union {
0347 struct {
0348 struct midr_range midr_range;
0349 const struct arm64_midr_revidr {
0350 u32 midr_rv;
0351 u32 revidr_mask;
0352 } * const fixed_revs;
0353 };
0354
0355 const struct midr_range *midr_range_list;
0356 struct {
0357 u32 sys_reg;
0358 u8 field_pos;
0359 u8 field_width;
0360 u8 min_field_value;
0361 u8 hwcap_type;
0362 bool sign;
0363 unsigned long hwcap;
0364 };
0365 };
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 const struct arm64_cpu_capabilities *match_list;
0379 };
0380
0381 static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
0382 {
0383 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
0384 }
0385
0386
0387
0388
0389
0390
0391 static inline bool
0392 cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
0393 int scope)
0394 {
0395 const struct arm64_cpu_capabilities *caps;
0396
0397 for (caps = entry->match_list; caps->matches; caps++)
0398 if (caps->matches(caps, scope))
0399 return true;
0400
0401 return false;
0402 }
0403
0404 static __always_inline bool is_vhe_hyp_code(void)
0405 {
0406
0407 return __is_defined(__KVM_VHE_HYPERVISOR__);
0408 }
0409
0410 static __always_inline bool is_nvhe_hyp_code(void)
0411 {
0412
0413 return __is_defined(__KVM_NVHE_HYPERVISOR__);
0414 }
0415
0416 static __always_inline bool is_hyp_code(void)
0417 {
0418 return is_vhe_hyp_code() || is_nvhe_hyp_code();
0419 }
0420
0421 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
0422 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
0423 extern struct static_key_false arm64_const_caps_ready;
0424
0425
0426 #define ARM64_NPATCHABLE (ARM64_NCAPS + 1)
0427 extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
0428
0429 #define for_each_available_cap(cap) \
0430 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
0431
0432 bool this_cpu_has_cap(unsigned int cap);
0433 void cpu_set_feature(unsigned int num);
0434 bool cpu_have_feature(unsigned int num);
0435 unsigned long cpu_get_elf_hwcap(void);
0436 unsigned long cpu_get_elf_hwcap2(void);
0437
0438 #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
0439 #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
0440
0441 static __always_inline bool system_capabilities_finalized(void)
0442 {
0443 return static_branch_likely(&arm64_const_caps_ready);
0444 }
0445
0446
0447
0448
0449
0450
0451 static inline bool cpus_have_cap(unsigned int num)
0452 {
0453 if (num >= ARM64_NCAPS)
0454 return false;
0455 return test_bit(num, cpu_hwcaps);
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 static __always_inline bool __cpus_have_const_cap(int num)
0467 {
0468 if (num >= ARM64_NCAPS)
0469 return false;
0470 return static_branch_unlikely(&cpu_hwcap_keys[num]);
0471 }
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 static __always_inline bool cpus_have_final_cap(int num)
0482 {
0483 if (system_capabilities_finalized())
0484 return __cpus_have_const_cap(num);
0485 else
0486 BUG();
0487 }
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500 static __always_inline bool cpus_have_const_cap(int num)
0501 {
0502 if (is_hyp_code())
0503 return cpus_have_final_cap(num);
0504 else if (system_capabilities_finalized())
0505 return __cpus_have_const_cap(num);
0506 else
0507 return cpus_have_cap(num);
0508 }
0509
0510 static inline void cpus_set_cap(unsigned int num)
0511 {
0512 if (num >= ARM64_NCAPS) {
0513 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
0514 num, ARM64_NCAPS);
0515 } else {
0516 __set_bit(num, cpu_hwcaps);
0517 }
0518 }
0519
0520 static inline int __attribute_const__
0521 cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
0522 {
0523 return (s64)(features << (64 - width - field)) >> (64 - width);
0524 }
0525
0526 static inline int __attribute_const__
0527 cpuid_feature_extract_signed_field(u64 features, int field)
0528 {
0529 return cpuid_feature_extract_signed_field_width(features, field, 4);
0530 }
0531
0532 static __always_inline unsigned int __attribute_const__
0533 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
0534 {
0535 return (u64)(features << (64 - width - field)) >> (64 - width);
0536 }
0537
0538 static __always_inline unsigned int __attribute_const__
0539 cpuid_feature_extract_unsigned_field(u64 features, int field)
0540 {
0541 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
0542 }
0543
0544
0545
0546
0547
0548
0549 static inline u64 __attribute_const__
0550 cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
0551 {
0552 u64 val = cpuid_feature_extract_unsigned_field(features, field);
0553 u64 mask = GENMASK_ULL(field + 3, field);
0554
0555
0556 if (val == ID_AA64DFR0_PMUVER_IMP_DEF)
0557 val = 0;
0558
0559 if (val > cap) {
0560 features &= ~mask;
0561 features |= (cap << field) & mask;
0562 }
0563
0564 return features;
0565 }
0566
0567 static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
0568 {
0569 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
0570 }
0571
0572 static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
0573 {
0574 return (reg->user_val | (reg->sys_val & reg->user_mask));
0575 }
0576
0577 static inline int __attribute_const__
0578 cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
0579 {
0580 if (WARN_ON_ONCE(!width))
0581 width = 4;
0582 return (sign) ?
0583 cpuid_feature_extract_signed_field_width(features, field, width) :
0584 cpuid_feature_extract_unsigned_field_width(features, field, width);
0585 }
0586
0587 static inline int __attribute_const__
0588 cpuid_feature_extract_field(u64 features, int field, bool sign)
0589 {
0590 return cpuid_feature_extract_field_width(features, field, 4, sign);
0591 }
0592
0593 static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
0594 {
0595 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
0596 }
0597
0598 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
0599 {
0600 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
0601 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
0602 }
0603
0604 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
0605 {
0606 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
0607
0608 return val == ID_AA64PFR0_ELx_32BIT_64BIT;
0609 }
0610
0611 static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
0612 {
0613 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
0614
0615 return val == ID_AA64PFR0_ELx_32BIT_64BIT;
0616 }
0617
0618 static inline bool id_aa64pfr0_sve(u64 pfr0)
0619 {
0620 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
0621
0622 return val > 0;
0623 }
0624
0625 static inline bool id_aa64pfr1_sme(u64 pfr1)
0626 {
0627 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT);
0628
0629 return val > 0;
0630 }
0631
0632 static inline bool id_aa64pfr1_mte(u64 pfr1)
0633 {
0634 u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
0635
0636 return val >= ID_AA64PFR1_MTE;
0637 }
0638
0639 void __init setup_cpu_features(void);
0640 void check_local_cpu_capabilities(void);
0641
0642 u64 read_sanitised_ftr_reg(u32 id);
0643 u64 __read_sysreg_by_encoding(u32 sys_id);
0644
0645 static inline bool cpu_supports_mixed_endian_el0(void)
0646 {
0647 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
0648 }
0649
0650
0651 static inline bool supports_csv2p3(int scope)
0652 {
0653 u64 pfr0;
0654 u8 csv2_val;
0655
0656 if (scope == SCOPE_LOCAL_CPU)
0657 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
0658 else
0659 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
0660
0661 csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
0662 ID_AA64PFR0_CSV2_SHIFT);
0663 return csv2_val == 3;
0664 }
0665
0666 static inline bool supports_clearbhb(int scope)
0667 {
0668 u64 isar2;
0669
0670 if (scope == SCOPE_LOCAL_CPU)
0671 isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
0672 else
0673 isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
0674
0675 return cpuid_feature_extract_unsigned_field(isar2,
0676 ID_AA64ISAR2_EL1_BC_SHIFT);
0677 }
0678
0679 const struct cpumask *system_32bit_el0_cpumask(void);
0680 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
0681
0682 static inline bool system_supports_32bit_el0(void)
0683 {
0684 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
0685
0686 return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
0687 id_aa64pfr0_32bit_el0(pfr0);
0688 }
0689
0690 static inline bool system_supports_4kb_granule(void)
0691 {
0692 u64 mmfr0;
0693 u32 val;
0694
0695 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0696 val = cpuid_feature_extract_unsigned_field(mmfr0,
0697 ID_AA64MMFR0_TGRAN4_SHIFT);
0698
0699 return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
0700 (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
0701 }
0702
0703 static inline bool system_supports_64kb_granule(void)
0704 {
0705 u64 mmfr0;
0706 u32 val;
0707
0708 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0709 val = cpuid_feature_extract_unsigned_field(mmfr0,
0710 ID_AA64MMFR0_TGRAN64_SHIFT);
0711
0712 return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
0713 (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
0714 }
0715
0716 static inline bool system_supports_16kb_granule(void)
0717 {
0718 u64 mmfr0;
0719 u32 val;
0720
0721 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0722 val = cpuid_feature_extract_unsigned_field(mmfr0,
0723 ID_AA64MMFR0_TGRAN16_SHIFT);
0724
0725 return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
0726 (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
0727 }
0728
0729 static inline bool system_supports_mixed_endian_el0(void)
0730 {
0731 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
0732 }
0733
0734 static inline bool system_supports_mixed_endian(void)
0735 {
0736 u64 mmfr0;
0737 u32 val;
0738
0739 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0740 val = cpuid_feature_extract_unsigned_field(mmfr0,
0741 ID_AA64MMFR0_BIGENDEL_SHIFT);
0742
0743 return val == 0x1;
0744 }
0745
0746 static __always_inline bool system_supports_fpsimd(void)
0747 {
0748 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
0749 }
0750
0751 static inline bool system_uses_hw_pan(void)
0752 {
0753 return IS_ENABLED(CONFIG_ARM64_PAN) &&
0754 cpus_have_const_cap(ARM64_HAS_PAN);
0755 }
0756
0757 static inline bool system_uses_ttbr0_pan(void)
0758 {
0759 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
0760 !system_uses_hw_pan();
0761 }
0762
0763 static __always_inline bool system_supports_sve(void)
0764 {
0765 return IS_ENABLED(CONFIG_ARM64_SVE) &&
0766 cpus_have_const_cap(ARM64_SVE);
0767 }
0768
0769 static __always_inline bool system_supports_sme(void)
0770 {
0771 return IS_ENABLED(CONFIG_ARM64_SME) &&
0772 cpus_have_const_cap(ARM64_SME);
0773 }
0774
0775 static __always_inline bool system_supports_fa64(void)
0776 {
0777 return IS_ENABLED(CONFIG_ARM64_SME) &&
0778 cpus_have_const_cap(ARM64_SME_FA64);
0779 }
0780
0781 static __always_inline bool system_supports_tpidr2(void)
0782 {
0783 return system_supports_sme();
0784 }
0785
0786 static __always_inline bool system_supports_cnp(void)
0787 {
0788 return IS_ENABLED(CONFIG_ARM64_CNP) &&
0789 cpus_have_const_cap(ARM64_HAS_CNP);
0790 }
0791
0792 static inline bool system_supports_address_auth(void)
0793 {
0794 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
0795 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH);
0796 }
0797
0798 static inline bool system_supports_generic_auth(void)
0799 {
0800 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
0801 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
0802 }
0803
0804 static inline bool system_has_full_ptr_auth(void)
0805 {
0806 return system_supports_address_auth() && system_supports_generic_auth();
0807 }
0808
0809 static __always_inline bool system_uses_irq_prio_masking(void)
0810 {
0811 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
0812 cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
0813 }
0814
0815 static inline bool system_supports_mte(void)
0816 {
0817 return IS_ENABLED(CONFIG_ARM64_MTE) &&
0818 cpus_have_const_cap(ARM64_MTE);
0819 }
0820
0821 static inline bool system_has_prio_mask_debugging(void)
0822 {
0823 return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
0824 system_uses_irq_prio_masking();
0825 }
0826
0827 static inline bool system_supports_bti(void)
0828 {
0829 return IS_ENABLED(CONFIG_ARM64_BTI) && cpus_have_const_cap(ARM64_BTI);
0830 }
0831
0832 static inline bool system_supports_tlb_range(void)
0833 {
0834 return IS_ENABLED(CONFIG_ARM64_TLB_RANGE) &&
0835 cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
0836 }
0837
0838 extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
0839
0840 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
0841 {
0842 switch (parange) {
0843 case ID_AA64MMFR0_PARANGE_32: return 32;
0844 case ID_AA64MMFR0_PARANGE_36: return 36;
0845 case ID_AA64MMFR0_PARANGE_40: return 40;
0846 case ID_AA64MMFR0_PARANGE_42: return 42;
0847 case ID_AA64MMFR0_PARANGE_44: return 44;
0848 case ID_AA64MMFR0_PARANGE_48: return 48;
0849 case ID_AA64MMFR0_PARANGE_52: return 52;
0850
0851
0852
0853
0854
0855
0856
0857 default: return CONFIG_ARM64_PA_BITS;
0858 }
0859 }
0860
0861
0862 static inline bool cpu_has_hw_af(void)
0863 {
0864 u64 mmfr1;
0865
0866 if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
0867 return false;
0868
0869 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
0870 return cpuid_feature_extract_unsigned_field(mmfr1,
0871 ID_AA64MMFR1_HADBS_SHIFT);
0872 }
0873
0874 static inline bool cpu_has_pan(void)
0875 {
0876 u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
0877 return cpuid_feature_extract_unsigned_field(mmfr1,
0878 ID_AA64MMFR1_PAN_SHIFT);
0879 }
0880
0881 #ifdef CONFIG_ARM64_AMU_EXTN
0882
0883 extern bool cpu_has_amu_feat(int cpu);
0884 #else
0885 static inline bool cpu_has_amu_feat(int cpu)
0886 {
0887 return false;
0888 }
0889 #endif
0890
0891
0892 extern int get_cpu_with_amu_feat(void);
0893
0894 static inline unsigned int get_vmid_bits(u64 mmfr1)
0895 {
0896 int vmid_bits;
0897
0898 vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
0899 ID_AA64MMFR1_VMIDBITS_SHIFT);
0900 if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
0901 return 16;
0902
0903
0904
0905
0906
0907 return 8;
0908 }
0909
0910 extern struct arm64_ftr_override id_aa64mmfr1_override;
0911 extern struct arm64_ftr_override id_aa64pfr0_override;
0912 extern struct arm64_ftr_override id_aa64pfr1_override;
0913 extern struct arm64_ftr_override id_aa64zfr0_override;
0914 extern struct arm64_ftr_override id_aa64smfr0_override;
0915 extern struct arm64_ftr_override id_aa64isar1_override;
0916 extern struct arm64_ftr_override id_aa64isar2_override;
0917
0918 u32 get_kvm_ipa_limit(void);
0919 void dump_cpu_features(void);
0920
0921 #endif
0922
0923 #endif