Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * local apic based NMI watchdog for various CPUs.
0004  *
0005  * This file also handles reservation of performance counters for coordination
0006  * with other users.
0007  *
0008  * Note that these events normally don't tick when the CPU idles. This means
0009  * the frequency varies with CPU load.
0010  *
0011  * Original code for K7/P6 written by Keith Owens
0012  *
0013  */
0014 
0015 #include <linux/percpu.h>
0016 #include <linux/export.h>
0017 #include <linux/kernel.h>
0018 #include <linux/bitops.h>
0019 #include <linux/smp.h>
0020 #include <asm/nmi.h>
0021 #include <linux/kprobes.h>
0022 
0023 #include <asm/apic.h>
0024 #include <asm/perf_event.h>
0025 
0026 /*
0027  * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
0028  * offset from MSR_P4_BSU_ESCR0.
0029  *
0030  * It will be the max for all platforms (for now)
0031  */
0032 #define NMI_MAX_COUNTER_BITS 66
0033 
0034 /*
0035  * perfctr_nmi_owner tracks the ownership of the perfctr registers:
0036  * evtsel_nmi_owner tracks the ownership of the event selection
0037  * - different performance counters/ event selection may be reserved for
0038  *   different subsystems this reservation system just tries to coordinate
0039  *   things a little
0040  */
0041 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
0042 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
0043 
0044 /* converts an msr to an appropriate reservation bit */
0045 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
0046 {
0047     /* returns the bit offset of the performance counter register */
0048     switch (boot_cpu_data.x86_vendor) {
0049     case X86_VENDOR_HYGON:
0050     case X86_VENDOR_AMD:
0051         if (msr >= MSR_F15H_PERF_CTR)
0052             return (msr - MSR_F15H_PERF_CTR) >> 1;
0053         return msr - MSR_K7_PERFCTR0;
0054     case X86_VENDOR_INTEL:
0055         if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
0056             return msr - MSR_ARCH_PERFMON_PERFCTR0;
0057 
0058         switch (boot_cpu_data.x86) {
0059         case 6:
0060             return msr - MSR_P6_PERFCTR0;
0061         case 11:
0062             return msr - MSR_KNC_PERFCTR0;
0063         case 15:
0064             return msr - MSR_P4_BPU_PERFCTR0;
0065         }
0066         break;
0067     case X86_VENDOR_ZHAOXIN:
0068     case X86_VENDOR_CENTAUR:
0069         return msr - MSR_ARCH_PERFMON_PERFCTR0;
0070     }
0071     return 0;
0072 }
0073 
0074 /*
0075  * converts an msr to an appropriate reservation bit
0076  * returns the bit offset of the event selection register
0077  */
0078 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
0079 {
0080     /* returns the bit offset of the event selection register */
0081     switch (boot_cpu_data.x86_vendor) {
0082     case X86_VENDOR_HYGON:
0083     case X86_VENDOR_AMD:
0084         if (msr >= MSR_F15H_PERF_CTL)
0085             return (msr - MSR_F15H_PERF_CTL) >> 1;
0086         return msr - MSR_K7_EVNTSEL0;
0087     case X86_VENDOR_INTEL:
0088         if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
0089             return msr - MSR_ARCH_PERFMON_EVENTSEL0;
0090 
0091         switch (boot_cpu_data.x86) {
0092         case 6:
0093             return msr - MSR_P6_EVNTSEL0;
0094         case 11:
0095             return msr - MSR_KNC_EVNTSEL0;
0096         case 15:
0097             return msr - MSR_P4_BSU_ESCR0;
0098         }
0099         break;
0100     case X86_VENDOR_ZHAOXIN:
0101     case X86_VENDOR_CENTAUR:
0102         return msr - MSR_ARCH_PERFMON_EVENTSEL0;
0103     }
0104     return 0;
0105 
0106 }
0107 
0108 int reserve_perfctr_nmi(unsigned int msr)
0109 {
0110     unsigned int counter;
0111 
0112     counter = nmi_perfctr_msr_to_bit(msr);
0113     /* register not managed by the allocator? */
0114     if (counter > NMI_MAX_COUNTER_BITS)
0115         return 1;
0116 
0117     if (!test_and_set_bit(counter, perfctr_nmi_owner))
0118         return 1;
0119     return 0;
0120 }
0121 EXPORT_SYMBOL(reserve_perfctr_nmi);
0122 
0123 void release_perfctr_nmi(unsigned int msr)
0124 {
0125     unsigned int counter;
0126 
0127     counter = nmi_perfctr_msr_to_bit(msr);
0128     /* register not managed by the allocator? */
0129     if (counter > NMI_MAX_COUNTER_BITS)
0130         return;
0131 
0132     clear_bit(counter, perfctr_nmi_owner);
0133 }
0134 EXPORT_SYMBOL(release_perfctr_nmi);
0135 
0136 int reserve_evntsel_nmi(unsigned int msr)
0137 {
0138     unsigned int counter;
0139 
0140     counter = nmi_evntsel_msr_to_bit(msr);
0141     /* register not managed by the allocator? */
0142     if (counter > NMI_MAX_COUNTER_BITS)
0143         return 1;
0144 
0145     if (!test_and_set_bit(counter, evntsel_nmi_owner))
0146         return 1;
0147     return 0;
0148 }
0149 EXPORT_SYMBOL(reserve_evntsel_nmi);
0150 
0151 void release_evntsel_nmi(unsigned int msr)
0152 {
0153     unsigned int counter;
0154 
0155     counter = nmi_evntsel_msr_to_bit(msr);
0156     /* register not managed by the allocator? */
0157     if (counter > NMI_MAX_COUNTER_BITS)
0158         return;
0159 
0160     clear_bit(counter, evntsel_nmi_owner);
0161 }
0162 EXPORT_SYMBOL(release_evntsel_nmi);