Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __X86_MCE_INTERNAL_H__
0003 #define __X86_MCE_INTERNAL_H__
0004 
0005 #undef pr_fmt
0006 #define pr_fmt(fmt) "mce: " fmt
0007 
0008 #include <linux/device.h>
0009 #include <asm/mce.h>
0010 
0011 enum severity_level {
0012     MCE_NO_SEVERITY,
0013     MCE_DEFERRED_SEVERITY,
0014     MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
0015     MCE_KEEP_SEVERITY,
0016     MCE_SOME_SEVERITY,
0017     MCE_AO_SEVERITY,
0018     MCE_UC_SEVERITY,
0019     MCE_AR_SEVERITY,
0020     MCE_PANIC_SEVERITY,
0021 };
0022 
0023 extern struct blocking_notifier_head x86_mce_decoder_chain;
0024 
0025 #define INITIAL_CHECK_INTERVAL  5 * 60 /* 5 minutes */
0026 
0027 struct mce_evt_llist {
0028     struct llist_node llnode;
0029     struct mce mce;
0030 };
0031 
0032 void mce_gen_pool_process(struct work_struct *__unused);
0033 bool mce_gen_pool_empty(void);
0034 int mce_gen_pool_add(struct mce *mce);
0035 int mce_gen_pool_init(void);
0036 struct llist_node *mce_gen_pool_prepare_records(void);
0037 
0038 int mce_severity(struct mce *a, struct pt_regs *regs, char **msg, bool is_excp);
0039 struct dentry *mce_get_debugfs_dir(void);
0040 
0041 extern mce_banks_t mce_banks_ce_disabled;
0042 
0043 #ifdef CONFIG_X86_MCE_INTEL
0044 unsigned long cmci_intel_adjust_timer(unsigned long interval);
0045 bool mce_intel_cmci_poll(void);
0046 void mce_intel_hcpu_update(unsigned long cpu);
0047 void cmci_disable_bank(int bank);
0048 void intel_init_cmci(void);
0049 void intel_init_lmce(void);
0050 void intel_clear_lmce(void);
0051 bool intel_filter_mce(struct mce *m);
0052 #else
0053 # define cmci_intel_adjust_timer mce_adjust_timer_default
0054 static inline bool mce_intel_cmci_poll(void) { return false; }
0055 static inline void mce_intel_hcpu_update(unsigned long cpu) { }
0056 static inline void cmci_disable_bank(int bank) { }
0057 static inline void intel_init_cmci(void) { }
0058 static inline void intel_init_lmce(void) { }
0059 static inline void intel_clear_lmce(void) { }
0060 static inline bool intel_filter_mce(struct mce *m) { return false; }
0061 #endif
0062 
0063 void mce_timer_kick(unsigned long interval);
0064 
0065 #ifdef CONFIG_ACPI_APEI
0066 int apei_write_mce(struct mce *m);
0067 ssize_t apei_read_mce(struct mce *m, u64 *record_id);
0068 int apei_check_mce(void);
0069 int apei_clear_mce(u64 record_id);
0070 #else
0071 static inline int apei_write_mce(struct mce *m)
0072 {
0073     return -EINVAL;
0074 }
0075 static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
0076 {
0077     return 0;
0078 }
0079 static inline int apei_check_mce(void)
0080 {
0081     return 0;
0082 }
0083 static inline int apei_clear_mce(u64 record_id)
0084 {
0085     return -EINVAL;
0086 }
0087 #endif
0088 
0089 /*
0090  * We consider records to be equivalent if bank+status+addr+misc all match.
0091  * This is only used when the system is going down because of a fatal error
0092  * to avoid cluttering the console log with essentially repeated information.
0093  * In normal processing all errors seen are logged.
0094  */
0095 static inline bool mce_cmp(struct mce *m1, struct mce *m2)
0096 {
0097     return m1->bank != m2->bank ||
0098         m1->status != m2->status ||
0099         m1->addr != m2->addr ||
0100         m1->misc != m2->misc;
0101 }
0102 
0103 extern struct device_attribute dev_attr_trigger;
0104 
0105 #ifdef CONFIG_X86_MCELOG_LEGACY
0106 void mce_work_trigger(void);
0107 void mce_register_injector_chain(struct notifier_block *nb);
0108 void mce_unregister_injector_chain(struct notifier_block *nb);
0109 #else
0110 static inline void mce_work_trigger(void)   { }
0111 static inline void mce_register_injector_chain(struct notifier_block *nb)   { }
0112 static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
0113 #endif
0114 
0115 struct mca_config {
0116     __u64 lmce_disabled     : 1,
0117           disabled          : 1,
0118           ser           : 1,
0119           recovery          : 1,
0120           bios_cmci_threshold   : 1,
0121           /* Proper #MC exception handler is set */
0122           initialized       : 1,
0123           __reserved        : 58;
0124 
0125     bool dont_log_ce;
0126     bool cmci_disabled;
0127     bool ignore_ce;
0128     bool print_all;
0129 
0130     int monarch_timeout;
0131     int panic_timeout;
0132     u32 rip_msr;
0133     s8 bootlog;
0134 };
0135 
0136 extern struct mca_config mca_cfg;
0137 DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
0138 
0139 struct mce_vendor_flags {
0140     /*
0141      * Indicates that overflow conditions are not fatal, when set.
0142      */
0143     __u64 overflow_recov    : 1,
0144 
0145     /*
0146      * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
0147      * Recovery. It indicates support for data poisoning in HW and deferred
0148      * error interrupts.
0149      */
0150     succor          : 1,
0151 
0152     /*
0153      * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
0154      * the register space for each MCA bank and also increases number of
0155      * banks. Also, to accommodate the new banks and registers, the MCA
0156      * register space is moved to a new MSR range.
0157      */
0158     smca            : 1,
0159 
0160     /* AMD-style error thresholding banks present. */
0161     amd_threshold       : 1,
0162 
0163     /* Pentium, family 5-style MCA */
0164     p5          : 1,
0165 
0166     /* Centaur Winchip C6-style MCA */
0167     winchip         : 1,
0168 
0169     /* SandyBridge IFU quirk */
0170     snb_ifu_quirk       : 1,
0171 
0172     /* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
0173     skx_repmov_quirk    : 1,
0174 
0175     __reserved_0        : 56;
0176 };
0177 
0178 extern struct mce_vendor_flags mce_flags;
0179 
0180 enum mca_msr {
0181     MCA_CTL,
0182     MCA_STATUS,
0183     MCA_ADDR,
0184     MCA_MISC,
0185 };
0186 
0187 /* Decide whether to add MCE record to MCE event pool or filter it out. */
0188 extern bool filter_mce(struct mce *m);
0189 
0190 #ifdef CONFIG_X86_MCE_AMD
0191 extern bool amd_filter_mce(struct mce *m);
0192 #else
0193 static inline bool amd_filter_mce(struct mce *m) { return false; }
0194 #endif
0195 
0196 #ifdef CONFIG_X86_ANCIENT_MCE
0197 void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
0198 void winchip_mcheck_init(struct cpuinfo_x86 *c);
0199 noinstr void pentium_machine_check(struct pt_regs *regs);
0200 noinstr void winchip_machine_check(struct pt_regs *regs);
0201 static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
0202 #else
0203 static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
0204 static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
0205 static inline void enable_p5_mce(void) {}
0206 static inline void pentium_machine_check(struct pt_regs *regs) {}
0207 static inline void winchip_machine_check(struct pt_regs *regs) {}
0208 #endif
0209 
0210 noinstr u64 mce_rdmsrl(u32 msr);
0211 
0212 static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
0213 {
0214     if (cpu_feature_enabled(X86_FEATURE_SMCA)) {
0215         switch (reg) {
0216         case MCA_CTL:    return MSR_AMD64_SMCA_MCx_CTL(bank);
0217         case MCA_ADDR:   return MSR_AMD64_SMCA_MCx_ADDR(bank);
0218         case MCA_MISC:   return MSR_AMD64_SMCA_MCx_MISC(bank);
0219         case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
0220         }
0221     }
0222 
0223     switch (reg) {
0224     case MCA_CTL:    return MSR_IA32_MCx_CTL(bank);
0225     case MCA_ADDR:   return MSR_IA32_MCx_ADDR(bank);
0226     case MCA_MISC:   return MSR_IA32_MCx_MISC(bank);
0227     case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
0228     }
0229 
0230     return 0;
0231 }
0232 
0233 #endif /* __X86_MCE_INTERNAL_H__ */