0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define pr_fmt(fmt) "ACPI: " fmt
0013
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/init.h>
0017 #include <linux/cpufreq.h>
0018 #include <linux/slab.h>
0019 #include <linux/acpi.h>
0020 #include <acpi/processor.h>
0021 #ifdef CONFIG_X86
0022 #include <asm/cpufeature.h>
0023 #endif
0024
0025 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
0026
0027 static DEFINE_MUTEX(performance_mutex);
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 static int ignore_ppc = -1;
0046 module_param(ignore_ppc, int, 0644);
0047 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
0048 "limited by BIOS, this should help");
0049
0050 static bool acpi_processor_ppc_in_use;
0051
0052 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
0053 {
0054 acpi_status status = 0;
0055 unsigned long long ppc = 0;
0056 int ret;
0057
0058 if (!pr)
0059 return -EINVAL;
0060
0061
0062
0063
0064
0065 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
0066 if (status != AE_NOT_FOUND) {
0067 acpi_processor_ppc_in_use = true;
0068
0069 if (ACPI_FAILURE(status)) {
0070 acpi_evaluation_failure_warn(pr->handle, "_PPC", status);
0071 return -ENODEV;
0072 }
0073 }
0074
0075 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
0076 (int)ppc, ppc ? "" : "not");
0077
0078 pr->performance_platform_limit = (int)ppc;
0079
0080 if (ppc >= pr->performance->state_count ||
0081 unlikely(!freq_qos_request_active(&pr->perflib_req)))
0082 return 0;
0083
0084 ret = freq_qos_update_request(&pr->perflib_req,
0085 pr->performance->states[ppc].core_frequency * 1000);
0086 if (ret < 0) {
0087 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
0088 pr->id, ret);
0089 }
0090
0091 return 0;
0092 }
0093
0094 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
0095
0096
0097
0098
0099
0100
0101
0102 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
0103 {
0104 if (acpi_has_method(handle, "_OST"))
0105 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
0106 status, NULL);
0107 }
0108
0109 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
0110 {
0111 int ret;
0112
0113 if (ignore_ppc || !pr->performance) {
0114
0115
0116
0117
0118 if (event_flag)
0119 acpi_processor_ppc_ost(pr->handle, 1);
0120 return;
0121 }
0122
0123 ret = acpi_processor_get_platform_limit(pr);
0124
0125
0126
0127
0128 if (event_flag) {
0129 if (ret < 0)
0130 acpi_processor_ppc_ost(pr->handle, 1);
0131 else
0132 acpi_processor_ppc_ost(pr->handle, 0);
0133 }
0134 if (ret >= 0)
0135 cpufreq_update_limits(pr->id);
0136 }
0137
0138 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
0139 {
0140 struct acpi_processor *pr;
0141
0142 pr = per_cpu(processors, cpu);
0143 if (!pr || !pr->performance || !pr->performance->state_count)
0144 return -ENODEV;
0145 *limit = pr->performance->states[pr->performance_platform_limit].
0146 core_frequency * 1000;
0147 return 0;
0148 }
0149 EXPORT_SYMBOL(acpi_processor_get_bios_limit);
0150
0151 void acpi_processor_ignore_ppc_init(void)
0152 {
0153 if (ignore_ppc < 0)
0154 ignore_ppc = 0;
0155 }
0156
0157 void acpi_processor_ppc_init(struct cpufreq_policy *policy)
0158 {
0159 unsigned int cpu;
0160
0161 for_each_cpu(cpu, policy->related_cpus) {
0162 struct acpi_processor *pr = per_cpu(processors, cpu);
0163 int ret;
0164
0165 if (!pr)
0166 continue;
0167
0168 ret = freq_qos_add_request(&policy->constraints,
0169 &pr->perflib_req,
0170 FREQ_QOS_MAX, INT_MAX);
0171 if (ret < 0)
0172 pr_err("Failed to add freq constraint for CPU%d (%d)\n",
0173 cpu, ret);
0174 }
0175 }
0176
0177 void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
0178 {
0179 unsigned int cpu;
0180
0181 for_each_cpu(cpu, policy->related_cpus) {
0182 struct acpi_processor *pr = per_cpu(processors, cpu);
0183
0184 if (pr)
0185 freq_qos_remove_request(&pr->perflib_req);
0186 }
0187 }
0188
0189 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
0190 {
0191 int result = 0;
0192 acpi_status status = 0;
0193 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
0194 union acpi_object *pct = NULL;
0195 union acpi_object obj = { 0 };
0196
0197 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
0198 if (ACPI_FAILURE(status)) {
0199 acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
0200 return -ENODEV;
0201 }
0202
0203 pct = (union acpi_object *)buffer.pointer;
0204 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
0205 || (pct->package.count != 2)) {
0206 pr_err("Invalid _PCT data\n");
0207 result = -EFAULT;
0208 goto end;
0209 }
0210
0211
0212
0213
0214
0215 obj = pct->package.elements[0];
0216
0217 if ((obj.type != ACPI_TYPE_BUFFER)
0218 || (obj.buffer.length < sizeof(struct acpi_pct_register))
0219 || (obj.buffer.pointer == NULL)) {
0220 pr_err("Invalid _PCT data (control_register)\n");
0221 result = -EFAULT;
0222 goto end;
0223 }
0224 memcpy(&pr->performance->control_register, obj.buffer.pointer,
0225 sizeof(struct acpi_pct_register));
0226
0227
0228
0229
0230
0231 obj = pct->package.elements[1];
0232
0233 if ((obj.type != ACPI_TYPE_BUFFER)
0234 || (obj.buffer.length < sizeof(struct acpi_pct_register))
0235 || (obj.buffer.pointer == NULL)) {
0236 pr_err("Invalid _PCT data (status_register)\n");
0237 result = -EFAULT;
0238 goto end;
0239 }
0240
0241 memcpy(&pr->performance->status_register, obj.buffer.pointer,
0242 sizeof(struct acpi_pct_register));
0243
0244 end:
0245 kfree(buffer.pointer);
0246
0247 return result;
0248 }
0249
0250 #ifdef CONFIG_X86
0251
0252
0253
0254
0255 static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
0256 {
0257 u32 hi, lo, fid, did;
0258 int index = px->control & 0x00000007;
0259
0260 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
0261 return;
0262
0263 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
0264 || boot_cpu_data.x86 == 0x11) {
0265 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
0266
0267
0268
0269
0270 if (!(hi & BIT(31)))
0271 return;
0272
0273 fid = lo & 0x3f;
0274 did = (lo >> 6) & 7;
0275 if (boot_cpu_data.x86 == 0x10)
0276 px->core_frequency = (100 * (fid + 0x10)) >> did;
0277 else
0278 px->core_frequency = (100 * (fid + 8)) >> did;
0279 }
0280 }
0281 #else
0282 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
0283 #endif
0284
0285 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
0286 {
0287 int result = 0;
0288 acpi_status status = AE_OK;
0289 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
0290 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
0291 struct acpi_buffer state = { 0, NULL };
0292 union acpi_object *pss = NULL;
0293 int i;
0294 int last_invalid = -1;
0295
0296 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
0297 if (ACPI_FAILURE(status)) {
0298 acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
0299 return -ENODEV;
0300 }
0301
0302 pss = buffer.pointer;
0303 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
0304 pr_err("Invalid _PSS data\n");
0305 result = -EFAULT;
0306 goto end;
0307 }
0308
0309 acpi_handle_debug(pr->handle, "Found %d performance states\n",
0310 pss->package.count);
0311
0312 pr->performance->state_count = pss->package.count;
0313 pr->performance->states =
0314 kmalloc_array(pss->package.count,
0315 sizeof(struct acpi_processor_px),
0316 GFP_KERNEL);
0317 if (!pr->performance->states) {
0318 result = -ENOMEM;
0319 goto end;
0320 }
0321
0322 for (i = 0; i < pr->performance->state_count; i++) {
0323
0324 struct acpi_processor_px *px = &(pr->performance->states[i]);
0325
0326 state.length = sizeof(struct acpi_processor_px);
0327 state.pointer = px;
0328
0329 acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
0330
0331 status = acpi_extract_package(&(pss->package.elements[i]),
0332 &format, &state);
0333 if (ACPI_FAILURE(status)) {
0334 acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n",
0335 acpi_format_exception(status));
0336 result = -EFAULT;
0337 kfree(pr->performance->states);
0338 goto end;
0339 }
0340
0341 amd_fixup_frequency(px, i);
0342
0343 acpi_handle_debug(pr->handle,
0344 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
0345 i,
0346 (u32) px->core_frequency,
0347 (u32) px->power,
0348 (u32) px->transition_latency,
0349 (u32) px->bus_master_latency,
0350 (u32) px->control, (u32) px->status);
0351
0352
0353
0354
0355 if (!px->core_frequency ||
0356 ((u32)(px->core_frequency * 1000) !=
0357 (px->core_frequency * 1000))) {
0358 pr_err(FW_BUG
0359 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
0360 pr->id, px->core_frequency);
0361 if (last_invalid == -1)
0362 last_invalid = i;
0363 } else {
0364 if (last_invalid != -1) {
0365
0366
0367
0368 memcpy(&(pr->performance->states[last_invalid]),
0369 px, sizeof(struct acpi_processor_px));
0370 ++last_invalid;
0371 }
0372 }
0373 }
0374
0375 if (last_invalid == 0) {
0376 pr_err(FW_BUG
0377 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
0378 result = -EFAULT;
0379 kfree(pr->performance->states);
0380 pr->performance->states = NULL;
0381 }
0382
0383 if (last_invalid > 0)
0384 pr->performance->state_count = last_invalid;
0385
0386 end:
0387 kfree(buffer.pointer);
0388
0389 return result;
0390 }
0391
0392 int acpi_processor_get_performance_info(struct acpi_processor *pr)
0393 {
0394 int result = 0;
0395
0396 if (!pr || !pr->performance || !pr->handle)
0397 return -EINVAL;
0398
0399 if (!acpi_has_method(pr->handle, "_PCT")) {
0400 acpi_handle_debug(pr->handle,
0401 "ACPI-based processor performance control unavailable\n");
0402 return -ENODEV;
0403 }
0404
0405 result = acpi_processor_get_performance_control(pr);
0406 if (result)
0407 goto update_bios;
0408
0409 result = acpi_processor_get_performance_states(pr);
0410 if (result)
0411 goto update_bios;
0412
0413
0414 if (ignore_ppc != 1)
0415 result = acpi_processor_get_platform_limit(pr);
0416
0417 return result;
0418
0419
0420
0421
0422
0423 update_bios:
0424 #ifdef CONFIG_X86
0425 if (acpi_has_method(pr->handle, "_PPC")) {
0426 if(boot_cpu_has(X86_FEATURE_EST))
0427 pr_warn(FW_BUG "BIOS needs update for CPU "
0428 "frequency support\n");
0429 }
0430 #endif
0431 return result;
0432 }
0433 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
0434
0435 int acpi_processor_pstate_control(void)
0436 {
0437 acpi_status status;
0438
0439 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
0440 return 0;
0441
0442 pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n",
0443 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command);
0444
0445 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
0446 (u32)acpi_gbl_FADT.pstate_control, 8);
0447 if (ACPI_SUCCESS(status))
0448 return 1;
0449
0450 pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n",
0451 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command,
0452 acpi_format_exception(status));
0453 return -EIO;
0454 }
0455
0456 int acpi_processor_notify_smm(struct module *calling_module)
0457 {
0458 static int is_done;
0459 int result;
0460
0461 if (!acpi_processor_cpufreq_init)
0462 return -EBUSY;
0463
0464 if (!try_module_get(calling_module))
0465 return -EINVAL;
0466
0467
0468
0469
0470
0471
0472 if (is_done > 0) {
0473 module_put(calling_module);
0474 return 0;
0475 } else if (is_done < 0) {
0476 module_put(calling_module);
0477 return is_done;
0478 }
0479
0480 is_done = -EIO;
0481
0482 result = acpi_processor_pstate_control();
0483 if (!result) {
0484 pr_debug("No SMI port or pstate_control\n");
0485 module_put(calling_module);
0486 return 0;
0487 }
0488 if (result < 0) {
0489 module_put(calling_module);
0490 return result;
0491 }
0492
0493
0494
0495 is_done = 1;
0496
0497 if (!acpi_processor_ppc_in_use)
0498 module_put(calling_module);
0499
0500 return 0;
0501 }
0502
0503 EXPORT_SYMBOL(acpi_processor_notify_smm);
0504
0505 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
0506 {
0507 int result = 0;
0508 acpi_status status = AE_OK;
0509 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
0510 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
0511 struct acpi_buffer state = {0, NULL};
0512 union acpi_object *psd = NULL;
0513
0514 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
0515 if (ACPI_FAILURE(status)) {
0516 return -ENODEV;
0517 }
0518
0519 psd = buffer.pointer;
0520 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
0521 pr_err("Invalid _PSD data\n");
0522 result = -EFAULT;
0523 goto end;
0524 }
0525
0526 if (psd->package.count != 1) {
0527 pr_err("Invalid _PSD data\n");
0528 result = -EFAULT;
0529 goto end;
0530 }
0531
0532 state.length = sizeof(struct acpi_psd_package);
0533 state.pointer = pdomain;
0534
0535 status = acpi_extract_package(&(psd->package.elements[0]),
0536 &format, &state);
0537 if (ACPI_FAILURE(status)) {
0538 pr_err("Invalid _PSD data\n");
0539 result = -EFAULT;
0540 goto end;
0541 }
0542
0543 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
0544 pr_err("Unknown _PSD:num_entries\n");
0545 result = -EFAULT;
0546 goto end;
0547 }
0548
0549 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
0550 pr_err("Unknown _PSD:revision\n");
0551 result = -EFAULT;
0552 goto end;
0553 }
0554
0555 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
0556 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
0557 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
0558 pr_err("Invalid _PSD:coord_type\n");
0559 result = -EFAULT;
0560 goto end;
0561 }
0562 end:
0563 kfree(buffer.pointer);
0564 return result;
0565 }
0566 EXPORT_SYMBOL(acpi_processor_get_psd);
0567
0568 int acpi_processor_preregister_performance(
0569 struct acpi_processor_performance __percpu *performance)
0570 {
0571 int count_target;
0572 int retval = 0;
0573 unsigned int i, j;
0574 cpumask_var_t covered_cpus;
0575 struct acpi_processor *pr;
0576 struct acpi_psd_package *pdomain;
0577 struct acpi_processor *match_pr;
0578 struct acpi_psd_package *match_pdomain;
0579
0580 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
0581 return -ENOMEM;
0582
0583 mutex_lock(&performance_mutex);
0584
0585
0586
0587
0588
0589 for_each_possible_cpu(i) {
0590 pr = per_cpu(processors, i);
0591 if (!pr) {
0592
0593 continue;
0594 }
0595
0596 if (pr->performance) {
0597 retval = -EBUSY;
0598 goto err_out;
0599 }
0600
0601 if (!performance || !per_cpu_ptr(performance, i)) {
0602 retval = -EINVAL;
0603 goto err_out;
0604 }
0605 }
0606
0607
0608 for_each_possible_cpu(i) {
0609 pr = per_cpu(processors, i);
0610 if (!pr)
0611 continue;
0612
0613 pr->performance = per_cpu_ptr(performance, i);
0614 pdomain = &(pr->performance->domain_info);
0615 if (acpi_processor_get_psd(pr->handle, pdomain)) {
0616 retval = -EINVAL;
0617 continue;
0618 }
0619 }
0620 if (retval)
0621 goto err_ret;
0622
0623
0624
0625
0626
0627 for_each_possible_cpu(i) {
0628 pr = per_cpu(processors, i);
0629 if (!pr)
0630 continue;
0631
0632 if (cpumask_test_cpu(i, covered_cpus))
0633 continue;
0634
0635 pdomain = &(pr->performance->domain_info);
0636 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
0637 cpumask_set_cpu(i, covered_cpus);
0638 if (pdomain->num_processors <= 1)
0639 continue;
0640
0641
0642 count_target = pdomain->num_processors;
0643 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
0644 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
0645 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
0646 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
0647 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
0648 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
0649
0650 for_each_possible_cpu(j) {
0651 if (i == j)
0652 continue;
0653
0654 match_pr = per_cpu(processors, j);
0655 if (!match_pr)
0656 continue;
0657
0658 match_pdomain = &(match_pr->performance->domain_info);
0659 if (match_pdomain->domain != pdomain->domain)
0660 continue;
0661
0662
0663
0664 if (match_pdomain->num_processors != count_target) {
0665 retval = -EINVAL;
0666 goto err_ret;
0667 }
0668
0669 if (pdomain->coord_type != match_pdomain->coord_type) {
0670 retval = -EINVAL;
0671 goto err_ret;
0672 }
0673
0674 cpumask_set_cpu(j, covered_cpus);
0675 cpumask_set_cpu(j, pr->performance->shared_cpu_map);
0676 }
0677
0678 for_each_possible_cpu(j) {
0679 if (i == j)
0680 continue;
0681
0682 match_pr = per_cpu(processors, j);
0683 if (!match_pr)
0684 continue;
0685
0686 match_pdomain = &(match_pr->performance->domain_info);
0687 if (match_pdomain->domain != pdomain->domain)
0688 continue;
0689
0690 match_pr->performance->shared_type =
0691 pr->performance->shared_type;
0692 cpumask_copy(match_pr->performance->shared_cpu_map,
0693 pr->performance->shared_cpu_map);
0694 }
0695 }
0696
0697 err_ret:
0698 for_each_possible_cpu(i) {
0699 pr = per_cpu(processors, i);
0700 if (!pr || !pr->performance)
0701 continue;
0702
0703
0704 if (retval) {
0705 cpumask_clear(pr->performance->shared_cpu_map);
0706 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
0707 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
0708 }
0709 pr->performance = NULL;
0710 }
0711
0712 err_out:
0713 mutex_unlock(&performance_mutex);
0714 free_cpumask_var(covered_cpus);
0715 return retval;
0716 }
0717 EXPORT_SYMBOL(acpi_processor_preregister_performance);
0718
0719 int
0720 acpi_processor_register_performance(struct acpi_processor_performance
0721 *performance, unsigned int cpu)
0722 {
0723 struct acpi_processor *pr;
0724
0725 if (!acpi_processor_cpufreq_init)
0726 return -EINVAL;
0727
0728 mutex_lock(&performance_mutex);
0729
0730 pr = per_cpu(processors, cpu);
0731 if (!pr) {
0732 mutex_unlock(&performance_mutex);
0733 return -ENODEV;
0734 }
0735
0736 if (pr->performance) {
0737 mutex_unlock(&performance_mutex);
0738 return -EBUSY;
0739 }
0740
0741 WARN_ON(!performance);
0742
0743 pr->performance = performance;
0744
0745 if (acpi_processor_get_performance_info(pr)) {
0746 pr->performance = NULL;
0747 mutex_unlock(&performance_mutex);
0748 return -EIO;
0749 }
0750
0751 mutex_unlock(&performance_mutex);
0752 return 0;
0753 }
0754
0755 EXPORT_SYMBOL(acpi_processor_register_performance);
0756
0757 void acpi_processor_unregister_performance(unsigned int cpu)
0758 {
0759 struct acpi_processor *pr;
0760
0761 mutex_lock(&performance_mutex);
0762
0763 pr = per_cpu(processors, cpu);
0764 if (!pr) {
0765 mutex_unlock(&performance_mutex);
0766 return;
0767 }
0768
0769 if (pr->performance)
0770 kfree(pr->performance->states);
0771 pr->performance = NULL;
0772
0773 mutex_unlock(&performance_mutex);
0774
0775 return;
0776 }
0777
0778 EXPORT_SYMBOL(acpi_processor_unregister_performance);