0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/export.h>
0010 #include <linux/init.h>
0011 #include <linux/acpi.h>
0012 #include <linux/cpu.h>
0013 #include <linux/sched.h>
0014
0015 #include <acpi/processor.h>
0016 #include <asm/mwait.h>
0017 #include <asm/special_insns.h>
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
0030 unsigned int cpu)
0031 {
0032 struct cpuinfo_x86 *c = &cpu_data(cpu);
0033
0034 flags->bm_check = 0;
0035 if (num_online_cpus() == 1)
0036 flags->bm_check = 1;
0037 else if (c->x86_vendor == X86_VENDOR_INTEL) {
0038
0039
0040
0041
0042
0043 flags->bm_check = 1;
0044 }
0045
0046
0047
0048
0049
0050
0051
0052 if (c->x86_vendor == X86_VENDOR_INTEL &&
0053 (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
0054 flags->bm_control = 0;
0055
0056
0057
0058
0059
0060
0061
0062 if (c->x86_vendor == X86_VENDOR_CENTAUR) {
0063 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
0064 c->x86_stepping >= 0x0e))
0065 flags->bm_check = 1;
0066 }
0067
0068 if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
0069
0070
0071
0072
0073
0074 flags->bm_check = 1;
0075
0076
0077
0078
0079
0080 flags->bm_control = 0;
0081 }
0082 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) {
0083
0084
0085
0086
0087
0088
0089 flags->bm_check = 1;
0090
0091
0092
0093
0094
0095 flags->bm_control = 0;
0096 }
0097 }
0098 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
0099
0100
0101
0102 struct cstate_entry {
0103 struct {
0104 unsigned int eax;
0105 unsigned int ecx;
0106 } states[ACPI_PROCESSOR_MAX_POWER];
0107 };
0108 static struct cstate_entry __percpu *cpu_cstate_entry;
0109
0110 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
0111
0112 #define NATIVE_CSTATE_BEYOND_HALT (2)
0113
0114 static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
0115 {
0116 struct acpi_processor_cx *cx = _cx;
0117 long retval;
0118 unsigned int eax, ebx, ecx, edx;
0119 unsigned int edx_part;
0120 unsigned int cstate_type;
0121 unsigned int num_cstate_subtype;
0122
0123 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
0124
0125
0126 cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
0127 MWAIT_CSTATE_MASK) + 1;
0128 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
0129 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
0130
0131 retval = 0;
0132
0133 if (num_cstate_subtype == 0) {
0134 pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
0135 cx->address, edx_part);
0136 retval = -1;
0137 goto out;
0138 }
0139
0140
0141 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
0142 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
0143 retval = -1;
0144 goto out;
0145 }
0146
0147 if (!mwait_supported[cstate_type]) {
0148 mwait_supported[cstate_type] = 1;
0149 printk(KERN_DEBUG
0150 "Monitor-Mwait will be used to enter C-%d state\n",
0151 cx->type);
0152 }
0153 snprintf(cx->desc,
0154 ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
0155 cx->address);
0156 out:
0157 return retval;
0158 }
0159
0160 int acpi_processor_ffh_cstate_probe(unsigned int cpu,
0161 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
0162 {
0163 struct cstate_entry *percpu_entry;
0164 struct cpuinfo_x86 *c = &cpu_data(cpu);
0165 long retval;
0166
0167 if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
0168 return -1;
0169
0170 if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
0171 return -1;
0172
0173 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
0174 percpu_entry->states[cx->index].eax = 0;
0175 percpu_entry->states[cx->index].ecx = 0;
0176
0177
0178
0179 retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
0180 false);
0181 if (retval == 0) {
0182
0183 percpu_entry->states[cx->index].eax = cx->address;
0184 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
0185 }
0186
0187
0188
0189
0190
0191
0192 if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
0193 cx->bm_sts_skip = 1;
0194
0195 return retval;
0196 }
0197 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
0198
0199 void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
0200 {
0201 unsigned int cpu = smp_processor_id();
0202 struct cstate_entry *percpu_entry;
0203
0204 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
0205 mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
0206 percpu_entry->states[cx->index].ecx);
0207 }
0208 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
0209
0210 static int __init ffh_cstate_init(void)
0211 {
0212 struct cpuinfo_x86 *c = &boot_cpu_data;
0213
0214 if (c->x86_vendor != X86_VENDOR_INTEL &&
0215 c->x86_vendor != X86_VENDOR_AMD &&
0216 c->x86_vendor != X86_VENDOR_HYGON)
0217 return -1;
0218
0219 cpu_cstate_entry = alloc_percpu(struct cstate_entry);
0220 return 0;
0221 }
0222
0223 static void __exit ffh_cstate_exit(void)
0224 {
0225 free_percpu(cpu_cstate_entry);
0226 cpu_cstate_entry = NULL;
0227 }
0228
0229 arch_initcall(ffh_cstate_init);
0230 __exitcall(ffh_cstate_exit);