0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
0012
0013 #include <linux/arch_topology.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/delay.h>
0017 #include <linux/cpu.h>
0018 #include <linux/cpufreq.h>
0019 #include <linux/dmi.h>
0020 #include <linux/irq_work.h>
0021 #include <linux/kthread.h>
0022 #include <linux/time.h>
0023 #include <linux/vmalloc.h>
0024 #include <uapi/linux/sched/types.h>
0025
0026 #include <asm/unaligned.h>
0027
0028 #include <acpi/cppc_acpi.h>
0029
0030
0031 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
0032
0033
0034 #define DMI_PROCESSOR_MAX_SPEED 0x14
0035
0036
0037
0038
0039
0040
0041
0042 static LIST_HEAD(cpu_data_list);
0043
0044 static bool boost_supported;
0045
0046 struct cppc_workaround_oem_info {
0047 char oem_id[ACPI_OEM_ID_SIZE + 1];
0048 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
0049 u32 oem_revision;
0050 };
0051
0052 static struct cppc_workaround_oem_info wa_info[] = {
0053 {
0054 .oem_id = "HISI ",
0055 .oem_table_id = "HIP07 ",
0056 .oem_revision = 0,
0057 }, {
0058 .oem_id = "HISI ",
0059 .oem_table_id = "HIP08 ",
0060 .oem_revision = 0,
0061 }
0062 };
0063
0064 static struct cpufreq_driver cppc_cpufreq_driver;
0065
0066 #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
0067
0068
0069 struct cppc_freq_invariance {
0070 int cpu;
0071 struct irq_work irq_work;
0072 struct kthread_work work;
0073 struct cppc_perf_fb_ctrs prev_perf_fb_ctrs;
0074 struct cppc_cpudata *cpu_data;
0075 };
0076
0077 static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
0078 static struct kthread_worker *kworker_fie;
0079
0080 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
0081 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
0082 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
0083 struct cppc_perf_fb_ctrs *fb_ctrs_t1);
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 static void cppc_scale_freq_workfn(struct kthread_work *work)
0103 {
0104 struct cppc_freq_invariance *cppc_fi;
0105 struct cppc_perf_fb_ctrs fb_ctrs = {0};
0106 struct cppc_cpudata *cpu_data;
0107 unsigned long local_freq_scale;
0108 u64 perf;
0109
0110 cppc_fi = container_of(work, struct cppc_freq_invariance, work);
0111 cpu_data = cppc_fi->cpu_data;
0112
0113 if (cppc_get_perf_ctrs(cppc_fi->cpu, &fb_ctrs)) {
0114 pr_warn("%s: failed to read perf counters\n", __func__);
0115 return;
0116 }
0117
0118 perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
0119 &fb_ctrs);
0120 cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
0121
0122 perf <<= SCHED_CAPACITY_SHIFT;
0123 local_freq_scale = div64_u64(perf, cpu_data->perf_caps.highest_perf);
0124
0125
0126 if (unlikely(local_freq_scale > 1024))
0127 local_freq_scale = 1024;
0128
0129 per_cpu(arch_freq_scale, cppc_fi->cpu) = local_freq_scale;
0130 }
0131
0132 static void cppc_irq_work(struct irq_work *irq_work)
0133 {
0134 struct cppc_freq_invariance *cppc_fi;
0135
0136 cppc_fi = container_of(irq_work, struct cppc_freq_invariance, irq_work);
0137 kthread_queue_work(kworker_fie, &cppc_fi->work);
0138 }
0139
0140 static void cppc_scale_freq_tick(void)
0141 {
0142 struct cppc_freq_invariance *cppc_fi = &per_cpu(cppc_freq_inv, smp_processor_id());
0143
0144
0145
0146
0147
0148 irq_work_queue(&cppc_fi->irq_work);
0149 }
0150
0151 static struct scale_freq_data cppc_sftd = {
0152 .source = SCALE_FREQ_SOURCE_CPPC,
0153 .set_freq_scale = cppc_scale_freq_tick,
0154 };
0155
0156 static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
0157 {
0158 struct cppc_freq_invariance *cppc_fi;
0159 int cpu, ret;
0160
0161 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
0162 return;
0163
0164 for_each_cpu(cpu, policy->cpus) {
0165 cppc_fi = &per_cpu(cppc_freq_inv, cpu);
0166 cppc_fi->cpu = cpu;
0167 cppc_fi->cpu_data = policy->driver_data;
0168 kthread_init_work(&cppc_fi->work, cppc_scale_freq_workfn);
0169 init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
0170
0171 ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
0172 if (ret) {
0173 pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
0174 __func__, cpu, ret);
0175
0176
0177
0178
0179
0180 if (cpu_online(cpu))
0181 return;
0182 }
0183 }
0184
0185
0186 topology_set_scale_freq_source(&cppc_sftd, policy->cpus);
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
0198 {
0199 struct cppc_freq_invariance *cppc_fi;
0200 int cpu;
0201
0202 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
0203 return;
0204
0205
0206 topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_CPPC, policy->related_cpus);
0207
0208 for_each_cpu(cpu, policy->related_cpus) {
0209 cppc_fi = &per_cpu(cppc_freq_inv, cpu);
0210 irq_work_sync(&cppc_fi->irq_work);
0211 kthread_cancel_work_sync(&cppc_fi->work);
0212 }
0213 }
0214
0215 static void __init cppc_freq_invariance_init(void)
0216 {
0217 struct sched_attr attr = {
0218 .size = sizeof(struct sched_attr),
0219 .sched_policy = SCHED_DEADLINE,
0220 .sched_nice = 0,
0221 .sched_priority = 0,
0222
0223
0224
0225
0226 .sched_runtime = 1000000,
0227 .sched_deadline = 10000000,
0228 .sched_period = 10000000,
0229 };
0230 int ret;
0231
0232 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
0233 return;
0234
0235 kworker_fie = kthread_create_worker(0, "cppc_fie");
0236 if (IS_ERR(kworker_fie))
0237 return;
0238
0239 ret = sched_setattr_nocheck(kworker_fie->task, &attr);
0240 if (ret) {
0241 pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
0242 ret);
0243 kthread_destroy_worker(kworker_fie);
0244 return;
0245 }
0246 }
0247
0248 static void cppc_freq_invariance_exit(void)
0249 {
0250 if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
0251 return;
0252
0253 kthread_destroy_worker(kworker_fie);
0254 kworker_fie = NULL;
0255 }
0256
0257 #else
0258 static inline void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
0259 {
0260 }
0261
0262 static inline void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
0263 {
0264 }
0265
0266 static inline void cppc_freq_invariance_init(void)
0267 {
0268 }
0269
0270 static inline void cppc_freq_invariance_exit(void)
0271 {
0272 }
0273 #endif
0274
0275
0276 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
0277 {
0278 const u8 *dmi_data = (const u8 *)dm;
0279 u16 *mhz = (u16 *)private;
0280
0281 if (dm->type == DMI_ENTRY_PROCESSOR &&
0282 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
0283 u16 val = (u16)get_unaligned((const u16 *)
0284 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
0285 *mhz = val > *mhz ? val : *mhz;
0286 }
0287 }
0288
0289
0290 static u64 cppc_get_dmi_max_khz(void)
0291 {
0292 u16 mhz = 0;
0293
0294 dmi_walk(cppc_find_dmi_mhz, &mhz);
0295
0296
0297
0298
0299
0300 mhz = mhz ? mhz : 1;
0301
0302 return (1000 * mhz);
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
0313 unsigned int perf)
0314 {
0315 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
0316 s64 retval, offset = 0;
0317 static u64 max_khz;
0318 u64 mul, div;
0319
0320 if (caps->lowest_freq && caps->nominal_freq) {
0321 mul = caps->nominal_freq - caps->lowest_freq;
0322 div = caps->nominal_perf - caps->lowest_perf;
0323 offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
0324 } else {
0325 if (!max_khz)
0326 max_khz = cppc_get_dmi_max_khz();
0327 mul = max_khz;
0328 div = caps->highest_perf;
0329 }
0330
0331 retval = offset + div64_u64(perf * mul, div);
0332 if (retval >= 0)
0333 return retval;
0334 return 0;
0335 }
0336
0337 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
0338 unsigned int freq)
0339 {
0340 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
0341 s64 retval, offset = 0;
0342 static u64 max_khz;
0343 u64 mul, div;
0344
0345 if (caps->lowest_freq && caps->nominal_freq) {
0346 mul = caps->nominal_perf - caps->lowest_perf;
0347 div = caps->nominal_freq - caps->lowest_freq;
0348 offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
0349 } else {
0350 if (!max_khz)
0351 max_khz = cppc_get_dmi_max_khz();
0352 mul = caps->highest_perf;
0353 div = max_khz;
0354 }
0355
0356 retval = offset + div64_u64(freq * mul, div);
0357 if (retval >= 0)
0358 return retval;
0359 return 0;
0360 }
0361
0362 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
0363 unsigned int target_freq,
0364 unsigned int relation)
0365
0366 {
0367 struct cppc_cpudata *cpu_data = policy->driver_data;
0368 unsigned int cpu = policy->cpu;
0369 struct cpufreq_freqs freqs;
0370 u32 desired_perf;
0371 int ret = 0;
0372
0373 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
0374
0375 if (desired_perf == cpu_data->perf_ctrls.desired_perf)
0376 return ret;
0377
0378 cpu_data->perf_ctrls.desired_perf = desired_perf;
0379 freqs.old = policy->cur;
0380 freqs.new = target_freq;
0381
0382 cpufreq_freq_transition_begin(policy, &freqs);
0383 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
0384 cpufreq_freq_transition_end(policy, &freqs, ret != 0);
0385
0386 if (ret)
0387 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
0388 cpu, ret);
0389
0390 return ret;
0391 }
0392
0393 static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
0394 unsigned int target_freq)
0395 {
0396 struct cppc_cpudata *cpu_data = policy->driver_data;
0397 unsigned int cpu = policy->cpu;
0398 u32 desired_perf;
0399 int ret;
0400
0401 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
0402 cpu_data->perf_ctrls.desired_perf = desired_perf;
0403 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
0404
0405 if (ret) {
0406 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
0407 cpu, ret);
0408 return 0;
0409 }
0410
0411 return target_freq;
0412 }
0413
0414 static int cppc_verify_policy(struct cpufreq_policy_data *policy)
0415 {
0416 cpufreq_verify_within_cpu_limits(policy);
0417 return 0;
0418 }
0419
0420
0421
0422
0423
0424
0425
0426 #ifdef CONFIG_ARM64
0427 #include <asm/cputype.h>
0428
0429 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
0430 {
0431 unsigned long implementor = read_cpuid_implementor();
0432 unsigned long part_num = read_cpuid_part_number();
0433
0434 switch (implementor) {
0435 case ARM_CPU_IMP_QCOM:
0436 switch (part_num) {
0437 case QCOM_CPU_PART_FALKOR_V1:
0438 case QCOM_CPU_PART_FALKOR:
0439 return 10000;
0440 }
0441 }
0442 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
0443 }
0444 #else
0445 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
0446 {
0447 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
0448 }
0449 #endif
0450
0451 #if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
0452
0453 static DEFINE_PER_CPU(unsigned int, efficiency_class);
0454 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
0455
0456
0457 #define CPPC_EM_CAP_STEP (20)
0458
0459 #define CPPC_EM_COST_STEP (1)
0460
0461 #define CPPC_EM_COST_GAP (4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
0462 / CPPC_EM_CAP_STEP)
0463
0464 static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
0465 {
0466 struct cppc_perf_caps *perf_caps;
0467 unsigned int min_cap, max_cap;
0468 struct cppc_cpudata *cpu_data;
0469 int cpu = policy->cpu;
0470
0471 cpu_data = policy->driver_data;
0472 perf_caps = &cpu_data->perf_caps;
0473 max_cap = arch_scale_cpu_capacity(cpu);
0474 min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
0475 if ((min_cap == 0) || (max_cap < min_cap))
0476 return 0;
0477 return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
0478 }
0479
0480
0481
0482
0483
0484 static inline unsigned long compute_cost(int cpu, int step)
0485 {
0486 return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
0487 step * CPPC_EM_COST_STEP;
0488 }
0489
0490 static int cppc_get_cpu_power(struct device *cpu_dev,
0491 unsigned long *power, unsigned long *KHz)
0492 {
0493 unsigned long perf_step, perf_prev, perf, perf_check;
0494 unsigned int min_step, max_step, step, step_check;
0495 unsigned long prev_freq = *KHz;
0496 unsigned int min_cap, max_cap;
0497 struct cpufreq_policy *policy;
0498
0499 struct cppc_perf_caps *perf_caps;
0500 struct cppc_cpudata *cpu_data;
0501
0502 policy = cpufreq_cpu_get_raw(cpu_dev->id);
0503 cpu_data = policy->driver_data;
0504 perf_caps = &cpu_data->perf_caps;
0505 max_cap = arch_scale_cpu_capacity(cpu_dev->id);
0506 min_cap = div_u64(max_cap * perf_caps->lowest_perf,
0507 perf_caps->highest_perf);
0508
0509 perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
0510 min_step = min_cap / CPPC_EM_CAP_STEP;
0511 max_step = max_cap / CPPC_EM_CAP_STEP;
0512
0513 perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
0514 step = perf_prev / perf_step;
0515
0516 if (step > max_step)
0517 return -EINVAL;
0518
0519 if (min_step == max_step) {
0520 step = max_step;
0521 perf = perf_caps->highest_perf;
0522 } else if (step < min_step) {
0523 step = min_step;
0524 perf = perf_caps->lowest_perf;
0525 } else {
0526 step++;
0527 if (step == max_step)
0528 perf = perf_caps->highest_perf;
0529 else
0530 perf = step * perf_step;
0531 }
0532
0533 *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
0534 perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
0535 step_check = perf_check / perf_step;
0536
0537
0538
0539
0540
0541
0542 while ((*KHz == prev_freq) || (step_check != step)) {
0543 perf++;
0544 *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
0545 perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
0546 step_check = perf_check / perf_step;
0547 }
0548
0549
0550
0551
0552
0553
0554 *power = compute_cost(cpu_dev->id, step);
0555
0556 return 0;
0557 }
0558
0559 static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
0560 unsigned long *cost)
0561 {
0562 unsigned long perf_step, perf_prev;
0563 struct cppc_perf_caps *perf_caps;
0564 struct cpufreq_policy *policy;
0565 struct cppc_cpudata *cpu_data;
0566 unsigned int max_cap;
0567 int step;
0568
0569 policy = cpufreq_cpu_get_raw(cpu_dev->id);
0570 cpu_data = policy->driver_data;
0571 perf_caps = &cpu_data->perf_caps;
0572 max_cap = arch_scale_cpu_capacity(cpu_dev->id);
0573
0574 perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
0575 perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
0576 step = perf_prev / perf_step;
0577
0578 *cost = compute_cost(cpu_dev->id, step);
0579
0580 return 0;
0581 }
0582
0583 static int populate_efficiency_class(void)
0584 {
0585 struct acpi_madt_generic_interrupt *gicc;
0586 DECLARE_BITMAP(used_classes, 256) = {};
0587 int class, cpu, index;
0588
0589 for_each_possible_cpu(cpu) {
0590 gicc = acpi_cpu_get_madt_gicc(cpu);
0591 class = gicc->efficiency_class;
0592 bitmap_set(used_classes, class, 1);
0593 }
0594
0595 if (bitmap_weight(used_classes, 256) <= 1) {
0596 pr_debug("Efficiency classes are all equal (=%d). "
0597 "No EM registered", class);
0598 return -EINVAL;
0599 }
0600
0601
0602
0603
0604
0605 index = 0;
0606 for_each_set_bit(class, used_classes, 256) {
0607 for_each_possible_cpu(cpu) {
0608 gicc = acpi_cpu_get_madt_gicc(cpu);
0609 if (gicc->efficiency_class == class)
0610 per_cpu(efficiency_class, cpu) = index;
0611 }
0612 index++;
0613 }
0614 cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
0615
0616 return 0;
0617 }
0618
0619 static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
0620 {
0621 struct cppc_cpudata *cpu_data;
0622 struct em_data_callback em_cb =
0623 EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
0624
0625 cpu_data = policy->driver_data;
0626 em_dev_register_perf_domain(get_cpu_device(policy->cpu),
0627 get_perf_level_count(policy), &em_cb,
0628 cpu_data->shared_cpu_map, 0);
0629 }
0630
0631 #else
0632 static int populate_efficiency_class(void)
0633 {
0634 return 0;
0635 }
0636 #endif
0637
0638 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
0639 {
0640 struct cppc_cpudata *cpu_data;
0641 int ret;
0642
0643 cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
0644 if (!cpu_data)
0645 goto out;
0646
0647 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
0648 goto free_cpu;
0649
0650 ret = acpi_get_psd_map(cpu, cpu_data);
0651 if (ret) {
0652 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
0653 goto free_mask;
0654 }
0655
0656 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
0657 if (ret) {
0658 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
0659 goto free_mask;
0660 }
0661
0662
0663 cpu_data->perf_caps.lowest_freq *= 1000;
0664 cpu_data->perf_caps.nominal_freq *= 1000;
0665
0666 list_add(&cpu_data->node, &cpu_data_list);
0667
0668 return cpu_data;
0669
0670 free_mask:
0671 free_cpumask_var(cpu_data->shared_cpu_map);
0672 free_cpu:
0673 kfree(cpu_data);
0674 out:
0675 return NULL;
0676 }
0677
0678 static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
0679 {
0680 struct cppc_cpudata *cpu_data = policy->driver_data;
0681
0682 list_del(&cpu_data->node);
0683 free_cpumask_var(cpu_data->shared_cpu_map);
0684 kfree(cpu_data);
0685 policy->driver_data = NULL;
0686 }
0687
0688 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
0689 {
0690 unsigned int cpu = policy->cpu;
0691 struct cppc_cpudata *cpu_data;
0692 struct cppc_perf_caps *caps;
0693 int ret;
0694
0695 cpu_data = cppc_cpufreq_get_cpu_data(cpu);
0696 if (!cpu_data) {
0697 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
0698 return -ENODEV;
0699 }
0700 caps = &cpu_data->perf_caps;
0701 policy->driver_data = cpu_data;
0702
0703
0704
0705
0706
0707 policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
0708 caps->lowest_nonlinear_perf);
0709 policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
0710 caps->nominal_perf);
0711
0712
0713
0714
0715
0716
0717 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
0718 caps->lowest_perf);
0719 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
0720 caps->nominal_perf);
0721
0722 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
0723 policy->shared_type = cpu_data->shared_type;
0724
0725 switch (policy->shared_type) {
0726 case CPUFREQ_SHARED_TYPE_HW:
0727 case CPUFREQ_SHARED_TYPE_NONE:
0728
0729 break;
0730 case CPUFREQ_SHARED_TYPE_ANY:
0731
0732
0733
0734
0735
0736 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
0737 break;
0738 default:
0739 pr_debug("Unsupported CPU co-ord type: %d\n",
0740 policy->shared_type);
0741 ret = -EFAULT;
0742 goto out;
0743 }
0744
0745 policy->fast_switch_possible = cppc_allow_fast_switch();
0746 policy->dvfs_possible_from_any_cpu = true;
0747
0748
0749
0750
0751
0752 if (caps->highest_perf > caps->nominal_perf)
0753 boost_supported = true;
0754
0755
0756 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
0757 cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
0758
0759 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
0760 if (ret) {
0761 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
0762 caps->highest_perf, cpu, ret);
0763 goto out;
0764 }
0765
0766 cppc_cpufreq_cpu_fie_init(policy);
0767 return 0;
0768
0769 out:
0770 cppc_cpufreq_put_cpu_data(policy);
0771 return ret;
0772 }
0773
0774 static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
0775 {
0776 struct cppc_cpudata *cpu_data = policy->driver_data;
0777 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
0778 unsigned int cpu = policy->cpu;
0779 int ret;
0780
0781 cppc_cpufreq_cpu_fie_exit(policy);
0782
0783 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
0784
0785 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
0786 if (ret)
0787 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
0788 caps->lowest_perf, cpu, ret);
0789
0790 cppc_cpufreq_put_cpu_data(policy);
0791 return 0;
0792 }
0793
0794 static inline u64 get_delta(u64 t1, u64 t0)
0795 {
0796 if (t1 > t0 || t0 > ~(u32)0)
0797 return t1 - t0;
0798
0799 return (u32)t1 - (u32)t0;
0800 }
0801
0802 static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
0803 struct cppc_perf_fb_ctrs *fb_ctrs_t0,
0804 struct cppc_perf_fb_ctrs *fb_ctrs_t1)
0805 {
0806 u64 delta_reference, delta_delivered;
0807 u64 reference_perf;
0808
0809 reference_perf = fb_ctrs_t0->reference_perf;
0810
0811 delta_reference = get_delta(fb_ctrs_t1->reference,
0812 fb_ctrs_t0->reference);
0813 delta_delivered = get_delta(fb_ctrs_t1->delivered,
0814 fb_ctrs_t0->delivered);
0815
0816
0817 if (!delta_reference || !delta_delivered)
0818 return cpu_data->perf_ctrls.desired_perf;
0819
0820 return (reference_perf * delta_delivered) / delta_reference;
0821 }
0822
0823 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
0824 {
0825 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
0826 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
0827 struct cppc_cpudata *cpu_data = policy->driver_data;
0828 u64 delivered_perf;
0829 int ret;
0830
0831 cpufreq_cpu_put(policy);
0832
0833 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
0834 if (ret)
0835 return ret;
0836
0837 udelay(2);
0838
0839 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
0840 if (ret)
0841 return ret;
0842
0843 delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
0844 &fb_ctrs_t1);
0845
0846 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
0847 }
0848
0849 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
0850 {
0851 struct cppc_cpudata *cpu_data = policy->driver_data;
0852 struct cppc_perf_caps *caps = &cpu_data->perf_caps;
0853 int ret;
0854
0855 if (!boost_supported) {
0856 pr_err("BOOST not supported by CPU or firmware\n");
0857 return -EINVAL;
0858 }
0859
0860 if (state)
0861 policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
0862 caps->highest_perf);
0863 else
0864 policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
0865 caps->nominal_perf);
0866 policy->cpuinfo.max_freq = policy->max;
0867
0868 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
0869 if (ret < 0)
0870 return ret;
0871
0872 return 0;
0873 }
0874
0875 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
0876 {
0877 struct cppc_cpudata *cpu_data = policy->driver_data;
0878
0879 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
0880 }
0881 cpufreq_freq_attr_ro(freqdomain_cpus);
0882
0883 static struct freq_attr *cppc_cpufreq_attr[] = {
0884 &freqdomain_cpus,
0885 NULL,
0886 };
0887
0888 static struct cpufreq_driver cppc_cpufreq_driver = {
0889 .flags = CPUFREQ_CONST_LOOPS,
0890 .verify = cppc_verify_policy,
0891 .target = cppc_cpufreq_set_target,
0892 .get = cppc_cpufreq_get_rate,
0893 .fast_switch = cppc_cpufreq_fast_switch,
0894 .init = cppc_cpufreq_cpu_init,
0895 .exit = cppc_cpufreq_cpu_exit,
0896 .set_boost = cppc_cpufreq_set_boost,
0897 .attr = cppc_cpufreq_attr,
0898 .name = "cppc_cpufreq",
0899 };
0900
0901
0902
0903
0904
0905
0906
0907 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
0908 {
0909 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
0910 struct cppc_cpudata *cpu_data = policy->driver_data;
0911 u64 desired_perf;
0912 int ret;
0913
0914 cpufreq_cpu_put(policy);
0915
0916 ret = cppc_get_desired_perf(cpu, &desired_perf);
0917 if (ret < 0)
0918 return -EIO;
0919
0920 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
0921 }
0922
0923 static void cppc_check_hisi_workaround(void)
0924 {
0925 struct acpi_table_header *tbl;
0926 acpi_status status = AE_OK;
0927 int i;
0928
0929 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
0930 if (ACPI_FAILURE(status) || !tbl)
0931 return;
0932
0933 for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
0934 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
0935 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
0936 wa_info[i].oem_revision == tbl->oem_revision) {
0937
0938 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
0939 break;
0940 }
0941 }
0942
0943 acpi_put_table(tbl);
0944 }
0945
0946 static int __init cppc_cpufreq_init(void)
0947 {
0948 int ret;
0949
0950 if ((acpi_disabled) || !acpi_cpc_valid())
0951 return -ENODEV;
0952
0953 cppc_check_hisi_workaround();
0954 cppc_freq_invariance_init();
0955 populate_efficiency_class();
0956
0957 ret = cpufreq_register_driver(&cppc_cpufreq_driver);
0958 if (ret)
0959 cppc_freq_invariance_exit();
0960
0961 return ret;
0962 }
0963
0964 static inline void free_cpu_data(void)
0965 {
0966 struct cppc_cpudata *iter, *tmp;
0967
0968 list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
0969 free_cpumask_var(iter->shared_cpu_map);
0970 list_del(&iter->node);
0971 kfree(iter);
0972 }
0973
0974 }
0975
0976 static void __exit cppc_cpufreq_exit(void)
0977 {
0978 cpufreq_unregister_driver(&cppc_cpufreq_driver);
0979 cppc_freq_invariance_exit();
0980
0981 free_cpu_data();
0982 }
0983
0984 module_exit(cppc_cpufreq_exit);
0985 MODULE_AUTHOR("Ashwin Chaugule");
0986 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
0987 MODULE_LICENSE("GPL");
0988
0989 late_initcall(cppc_cpufreq_init);
0990
0991 static const struct acpi_device_id cppc_acpi_ids[] __used = {
0992 {ACPI_PROCESSOR_DEVICE_HID, },
0993 {}
0994 };
0995
0996 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);