0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0016
0017 #include <linux/cpu.h>
0018 #include <linux/cpufreq.h>
0019 #include <linux/cpu_cooling.h>
0020 #include <linux/delay.h>
0021 #include <linux/device.h>
0022 #include <linux/init.h>
0023 #include <linux/kernel_stat.h>
0024 #include <linux/module.h>
0025 #include <linux/mutex.h>
0026 #include <linux/pm_qos.h>
0027 #include <linux/slab.h>
0028 #include <linux/suspend.h>
0029 #include <linux/syscore_ops.h>
0030 #include <linux/tick.h>
0031 #include <linux/units.h>
0032 #include <trace/events/power.h>
0033
0034 static LIST_HEAD(cpufreq_policy_list);
0035
0036
0037 #define for_each_suitable_policy(__policy, __active) \
0038 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
0039 if ((__active) == !policy_is_inactive(__policy))
0040
0041 #define for_each_active_policy(__policy) \
0042 for_each_suitable_policy(__policy, true)
0043 #define for_each_inactive_policy(__policy) \
0044 for_each_suitable_policy(__policy, false)
0045
0046
0047 static LIST_HEAD(cpufreq_governor_list);
0048 #define for_each_governor(__governor) \
0049 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
0050
0051 static char default_governor[CPUFREQ_NAME_LEN];
0052
0053
0054
0055
0056
0057
0058 static struct cpufreq_driver *cpufreq_driver;
0059 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
0060 static DEFINE_RWLOCK(cpufreq_driver_lock);
0061
0062 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
0063 bool cpufreq_supports_freq_invariance(void)
0064 {
0065 return static_branch_likely(&cpufreq_freq_invariance);
0066 }
0067
0068
0069 static bool cpufreq_suspended;
0070
0071 static inline bool has_target(void)
0072 {
0073 return cpufreq_driver->target_index || cpufreq_driver->target;
0074 }
0075
0076
0077 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
0078 static int cpufreq_init_governor(struct cpufreq_policy *policy);
0079 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
0080 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
0081 static int cpufreq_set_policy(struct cpufreq_policy *policy,
0082 struct cpufreq_governor *new_gov,
0083 unsigned int new_pol);
0084
0085
0086
0087
0088
0089
0090
0091
0092 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
0093 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
0094
0095 static int off __read_mostly;
0096 static int cpufreq_disabled(void)
0097 {
0098 return off;
0099 }
0100 void disable_cpufreq(void)
0101 {
0102 off = 1;
0103 }
0104 static DEFINE_MUTEX(cpufreq_governor_mutex);
0105
0106 bool have_governor_per_policy(void)
0107 {
0108 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
0109 }
0110 EXPORT_SYMBOL_GPL(have_governor_per_policy);
0111
0112 static struct kobject *cpufreq_global_kobject;
0113
0114 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
0115 {
0116 if (have_governor_per_policy())
0117 return &policy->kobj;
0118 else
0119 return cpufreq_global_kobject;
0120 }
0121 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
0122
0123 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
0124 {
0125 struct kernel_cpustat kcpustat;
0126 u64 cur_wall_time;
0127 u64 idle_time;
0128 u64 busy_time;
0129
0130 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
0131
0132 kcpustat_cpu_fetch(&kcpustat, cpu);
0133
0134 busy_time = kcpustat.cpustat[CPUTIME_USER];
0135 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
0136 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
0137 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
0138 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
0139 busy_time += kcpustat.cpustat[CPUTIME_NICE];
0140
0141 idle_time = cur_wall_time - busy_time;
0142 if (wall)
0143 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
0144
0145 return div_u64(idle_time, NSEC_PER_USEC);
0146 }
0147
0148 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
0149 {
0150 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
0151
0152 if (idle_time == -1ULL)
0153 return get_cpu_idle_time_jiffy(cpu, wall);
0154 else if (!io_busy)
0155 idle_time += get_cpu_iowait_time_us(cpu, wall);
0156
0157 return idle_time;
0158 }
0159 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
0160
0161
0162
0163
0164
0165
0166
0167
0168 void cpufreq_generic_init(struct cpufreq_policy *policy,
0169 struct cpufreq_frequency_table *table,
0170 unsigned int transition_latency)
0171 {
0172 policy->freq_table = table;
0173 policy->cpuinfo.transition_latency = transition_latency;
0174
0175
0176
0177
0178
0179 cpumask_setall(policy->cpus);
0180 }
0181 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
0182
0183 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
0184 {
0185 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
0186
0187 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
0188 }
0189 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
0190
0191 unsigned int cpufreq_generic_get(unsigned int cpu)
0192 {
0193 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
0194
0195 if (!policy || IS_ERR(policy->clk)) {
0196 pr_err("%s: No %s associated to cpu: %d\n",
0197 __func__, policy ? "clk" : "policy", cpu);
0198 return 0;
0199 }
0200
0201 return clk_get_rate(policy->clk) / 1000;
0202 }
0203 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
0217 {
0218 struct cpufreq_policy *policy = NULL;
0219 unsigned long flags;
0220
0221 if (WARN_ON(cpu >= nr_cpu_ids))
0222 return NULL;
0223
0224
0225 read_lock_irqsave(&cpufreq_driver_lock, flags);
0226
0227 if (cpufreq_driver) {
0228
0229 policy = cpufreq_cpu_get_raw(cpu);
0230 if (policy)
0231 kobject_get(&policy->kobj);
0232 }
0233
0234 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
0235
0236 return policy;
0237 }
0238 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
0239
0240
0241
0242
0243
0244 void cpufreq_cpu_put(struct cpufreq_policy *policy)
0245 {
0246 kobject_put(&policy->kobj);
0247 }
0248 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
0249
0250
0251
0252
0253
0254 void cpufreq_cpu_release(struct cpufreq_policy *policy)
0255 {
0256 if (WARN_ON(!policy))
0257 return;
0258
0259 lockdep_assert_held(&policy->rwsem);
0260
0261 up_write(&policy->rwsem);
0262
0263 cpufreq_cpu_put(policy);
0264 }
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
0279 {
0280 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
0281
0282 if (!policy)
0283 return NULL;
0284
0285 down_write(&policy->rwsem);
0286
0287 if (policy_is_inactive(policy)) {
0288 cpufreq_cpu_release(policy);
0289 return NULL;
0290 }
0291
0292 return policy;
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
0310 {
0311 #ifndef CONFIG_SMP
0312 static unsigned long l_p_j_ref;
0313 static unsigned int l_p_j_ref_freq;
0314
0315 if (ci->flags & CPUFREQ_CONST_LOOPS)
0316 return;
0317
0318 if (!l_p_j_ref_freq) {
0319 l_p_j_ref = loops_per_jiffy;
0320 l_p_j_ref_freq = ci->old;
0321 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
0322 l_p_j_ref, l_p_j_ref_freq);
0323 }
0324 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
0325 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
0326 ci->new);
0327 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
0328 loops_per_jiffy, ci->new);
0329 }
0330 #endif
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
0344 struct cpufreq_freqs *freqs,
0345 unsigned int state)
0346 {
0347 int cpu;
0348
0349 BUG_ON(irqs_disabled());
0350
0351 if (cpufreq_disabled())
0352 return;
0353
0354 freqs->policy = policy;
0355 freqs->flags = cpufreq_driver->flags;
0356 pr_debug("notification %u of frequency transition to %u kHz\n",
0357 state, freqs->new);
0358
0359 switch (state) {
0360 case CPUFREQ_PRECHANGE:
0361
0362
0363
0364
0365
0366 if (policy->cur && policy->cur != freqs->old) {
0367 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
0368 freqs->old, policy->cur);
0369 freqs->old = policy->cur;
0370 }
0371
0372 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
0373 CPUFREQ_PRECHANGE, freqs);
0374
0375 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
0376 break;
0377
0378 case CPUFREQ_POSTCHANGE:
0379 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
0380 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
0381 cpumask_pr_args(policy->cpus));
0382
0383 for_each_cpu(cpu, policy->cpus)
0384 trace_cpu_frequency(freqs->new, cpu);
0385
0386 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
0387 CPUFREQ_POSTCHANGE, freqs);
0388
0389 cpufreq_stats_record_transition(policy, freqs->new);
0390 policy->cur = freqs->new;
0391 }
0392 }
0393
0394
0395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
0396 struct cpufreq_freqs *freqs, int transition_failed)
0397 {
0398 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
0399 if (!transition_failed)
0400 return;
0401
0402 swap(freqs->old, freqs->new);
0403 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
0404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
0405 }
0406
0407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
0408 struct cpufreq_freqs *freqs)
0409 {
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
0420 && current == policy->transition_task);
0421
0422 wait:
0423 wait_event(policy->transition_wait, !policy->transition_ongoing);
0424
0425 spin_lock(&policy->transition_lock);
0426
0427 if (unlikely(policy->transition_ongoing)) {
0428 spin_unlock(&policy->transition_lock);
0429 goto wait;
0430 }
0431
0432 policy->transition_ongoing = true;
0433 policy->transition_task = current;
0434
0435 spin_unlock(&policy->transition_lock);
0436
0437 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
0438 }
0439 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
0440
0441 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
0442 struct cpufreq_freqs *freqs, int transition_failed)
0443 {
0444 if (WARN_ON(!policy->transition_ongoing))
0445 return;
0446
0447 cpufreq_notify_post_transition(policy, freqs, transition_failed);
0448
0449 arch_set_freq_scale(policy->related_cpus,
0450 policy->cur,
0451 policy->cpuinfo.max_freq);
0452
0453 policy->transition_ongoing = false;
0454 policy->transition_task = NULL;
0455
0456 wake_up(&policy->transition_wait);
0457 }
0458 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
0459
0460
0461
0462
0463
0464 static int cpufreq_fast_switch_count;
0465 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
0466
0467 static void cpufreq_list_transition_notifiers(void)
0468 {
0469 struct notifier_block *nb;
0470
0471 pr_info("Registered transition notifiers:\n");
0472
0473 mutex_lock(&cpufreq_transition_notifier_list.mutex);
0474
0475 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
0476 pr_info("%pS\n", nb->notifier_call);
0477
0478 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
0479 }
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
0493 {
0494 lockdep_assert_held(&policy->rwsem);
0495
0496 if (!policy->fast_switch_possible)
0497 return;
0498
0499 mutex_lock(&cpufreq_fast_switch_lock);
0500 if (cpufreq_fast_switch_count >= 0) {
0501 cpufreq_fast_switch_count++;
0502 policy->fast_switch_enabled = true;
0503 } else {
0504 pr_warn("CPU%u: Fast frequency switching not enabled\n",
0505 policy->cpu);
0506 cpufreq_list_transition_notifiers();
0507 }
0508 mutex_unlock(&cpufreq_fast_switch_lock);
0509 }
0510 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
0511
0512
0513
0514
0515
0516 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
0517 {
0518 mutex_lock(&cpufreq_fast_switch_lock);
0519 if (policy->fast_switch_enabled) {
0520 policy->fast_switch_enabled = false;
0521 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
0522 cpufreq_fast_switch_count--;
0523 }
0524 mutex_unlock(&cpufreq_fast_switch_lock);
0525 }
0526 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
0527
0528 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
0529 unsigned int target_freq, unsigned int relation)
0530 {
0531 unsigned int idx;
0532
0533 target_freq = clamp_val(target_freq, policy->min, policy->max);
0534
0535 if (!policy->freq_table)
0536 return target_freq;
0537
0538 idx = cpufreq_frequency_table_target(policy, target_freq, relation);
0539 policy->cached_resolved_idx = idx;
0540 policy->cached_target_freq = target_freq;
0541 return policy->freq_table[idx].frequency;
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
0556 unsigned int target_freq)
0557 {
0558 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
0559 }
0560 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
0561
0562 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
0563 {
0564 unsigned int latency;
0565
0566 if (policy->transition_delay_us)
0567 return policy->transition_delay_us;
0568
0569 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
0570 if (latency) {
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
0582 }
0583
0584 return LATENCY_MULTIPLIER;
0585 }
0586 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
0587
0588
0589
0590
0591 static ssize_t show_boost(struct kobject *kobj,
0592 struct kobj_attribute *attr, char *buf)
0593 {
0594 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
0595 }
0596
0597 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
0598 const char *buf, size_t count)
0599 {
0600 int ret, enable;
0601
0602 ret = sscanf(buf, "%d", &enable);
0603 if (ret != 1 || enable < 0 || enable > 1)
0604 return -EINVAL;
0605
0606 if (cpufreq_boost_trigger_state(enable)) {
0607 pr_err("%s: Cannot %s BOOST!\n",
0608 __func__, enable ? "enable" : "disable");
0609 return -EINVAL;
0610 }
0611
0612 pr_debug("%s: cpufreq BOOST %s\n",
0613 __func__, enable ? "enabled" : "disabled");
0614
0615 return count;
0616 }
0617 define_one_global_rw(boost);
0618
0619 static struct cpufreq_governor *find_governor(const char *str_governor)
0620 {
0621 struct cpufreq_governor *t;
0622
0623 for_each_governor(t)
0624 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
0625 return t;
0626
0627 return NULL;
0628 }
0629
0630 static struct cpufreq_governor *get_governor(const char *str_governor)
0631 {
0632 struct cpufreq_governor *t;
0633
0634 mutex_lock(&cpufreq_governor_mutex);
0635 t = find_governor(str_governor);
0636 if (!t)
0637 goto unlock;
0638
0639 if (!try_module_get(t->owner))
0640 t = NULL;
0641
0642 unlock:
0643 mutex_unlock(&cpufreq_governor_mutex);
0644
0645 return t;
0646 }
0647
0648 static unsigned int cpufreq_parse_policy(char *str_governor)
0649 {
0650 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
0651 return CPUFREQ_POLICY_PERFORMANCE;
0652
0653 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
0654 return CPUFREQ_POLICY_POWERSAVE;
0655
0656 return CPUFREQ_POLICY_UNKNOWN;
0657 }
0658
0659
0660
0661
0662
0663 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
0664 {
0665 struct cpufreq_governor *t;
0666
0667 t = get_governor(str_governor);
0668 if (t)
0669 return t;
0670
0671 if (request_module("cpufreq_%s", str_governor))
0672 return NULL;
0673
0674 return get_governor(str_governor);
0675 }
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 #define show_one(file_name, object) \
0686 static ssize_t show_##file_name \
0687 (struct cpufreq_policy *policy, char *buf) \
0688 { \
0689 return sprintf(buf, "%u\n", policy->object); \
0690 }
0691
0692 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
0693 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
0694 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
0695 show_one(scaling_min_freq, min);
0696 show_one(scaling_max_freq, max);
0697
0698 __weak unsigned int arch_freq_get_on_cpu(int cpu)
0699 {
0700 return 0;
0701 }
0702
0703 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
0704 {
0705 ssize_t ret;
0706 unsigned int freq;
0707
0708 freq = arch_freq_get_on_cpu(policy->cpu);
0709 if (freq)
0710 ret = sprintf(buf, "%u\n", freq);
0711 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
0712 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
0713 else
0714 ret = sprintf(buf, "%u\n", policy->cur);
0715 return ret;
0716 }
0717
0718
0719
0720
0721 #define store_one(file_name, object) \
0722 static ssize_t store_##file_name \
0723 (struct cpufreq_policy *policy, const char *buf, size_t count) \
0724 { \
0725 unsigned long val; \
0726 int ret; \
0727 \
0728 ret = sscanf(buf, "%lu", &val); \
0729 if (ret != 1) \
0730 return -EINVAL; \
0731 \
0732 ret = freq_qos_update_request(policy->object##_freq_req, val);\
0733 return ret >= 0 ? count : ret; \
0734 }
0735
0736 store_one(scaling_min_freq, min);
0737 store_one(scaling_max_freq, max);
0738
0739
0740
0741
0742 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
0743 char *buf)
0744 {
0745 unsigned int cur_freq = __cpufreq_get(policy);
0746
0747 if (cur_freq)
0748 return sprintf(buf, "%u\n", cur_freq);
0749
0750 return sprintf(buf, "<unknown>\n");
0751 }
0752
0753
0754
0755
0756 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
0757 {
0758 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
0759 return sprintf(buf, "powersave\n");
0760 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
0761 return sprintf(buf, "performance\n");
0762 else if (policy->governor)
0763 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
0764 policy->governor->name);
0765 return -EINVAL;
0766 }
0767
0768
0769
0770
0771 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
0772 const char *buf, size_t count)
0773 {
0774 char str_governor[16];
0775 int ret;
0776
0777 ret = sscanf(buf, "%15s", str_governor);
0778 if (ret != 1)
0779 return -EINVAL;
0780
0781 if (cpufreq_driver->setpolicy) {
0782 unsigned int new_pol;
0783
0784 new_pol = cpufreq_parse_policy(str_governor);
0785 if (!new_pol)
0786 return -EINVAL;
0787
0788 ret = cpufreq_set_policy(policy, NULL, new_pol);
0789 } else {
0790 struct cpufreq_governor *new_gov;
0791
0792 new_gov = cpufreq_parse_governor(str_governor);
0793 if (!new_gov)
0794 return -EINVAL;
0795
0796 ret = cpufreq_set_policy(policy, new_gov,
0797 CPUFREQ_POLICY_UNKNOWN);
0798
0799 module_put(new_gov->owner);
0800 }
0801
0802 return ret ? ret : count;
0803 }
0804
0805
0806
0807
0808 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
0809 {
0810 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
0811 }
0812
0813
0814
0815
0816 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
0817 char *buf)
0818 {
0819 ssize_t i = 0;
0820 struct cpufreq_governor *t;
0821
0822 if (!has_target()) {
0823 i += sprintf(buf, "performance powersave");
0824 goto out;
0825 }
0826
0827 mutex_lock(&cpufreq_governor_mutex);
0828 for_each_governor(t) {
0829 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
0830 - (CPUFREQ_NAME_LEN + 2)))
0831 break;
0832 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
0833 }
0834 mutex_unlock(&cpufreq_governor_mutex);
0835 out:
0836 i += sprintf(&buf[i], "\n");
0837 return i;
0838 }
0839
0840 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
0841 {
0842 ssize_t i = 0;
0843 unsigned int cpu;
0844
0845 for_each_cpu(cpu, mask) {
0846 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
0847 if (i >= (PAGE_SIZE - 5))
0848 break;
0849 }
0850
0851
0852 i--;
0853
0854 i += sprintf(&buf[i], "\n");
0855 return i;
0856 }
0857 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
0858
0859
0860
0861
0862
0863 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
0864 {
0865 return cpufreq_show_cpus(policy->related_cpus, buf);
0866 }
0867
0868
0869
0870
0871 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
0872 {
0873 return cpufreq_show_cpus(policy->cpus, buf);
0874 }
0875
0876 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
0877 const char *buf, size_t count)
0878 {
0879 unsigned int freq = 0;
0880 unsigned int ret;
0881
0882 if (!policy->governor || !policy->governor->store_setspeed)
0883 return -EINVAL;
0884
0885 ret = sscanf(buf, "%u", &freq);
0886 if (ret != 1)
0887 return -EINVAL;
0888
0889 policy->governor->store_setspeed(policy, freq);
0890
0891 return count;
0892 }
0893
0894 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
0895 {
0896 if (!policy->governor || !policy->governor->show_setspeed)
0897 return sprintf(buf, "<unsupported>\n");
0898
0899 return policy->governor->show_setspeed(policy, buf);
0900 }
0901
0902
0903
0904
0905 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
0906 {
0907 unsigned int limit;
0908 int ret;
0909 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
0910 if (!ret)
0911 return sprintf(buf, "%u\n", limit);
0912 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
0913 }
0914
0915 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
0916 cpufreq_freq_attr_ro(cpuinfo_min_freq);
0917 cpufreq_freq_attr_ro(cpuinfo_max_freq);
0918 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
0919 cpufreq_freq_attr_ro(scaling_available_governors);
0920 cpufreq_freq_attr_ro(scaling_driver);
0921 cpufreq_freq_attr_ro(scaling_cur_freq);
0922 cpufreq_freq_attr_ro(bios_limit);
0923 cpufreq_freq_attr_ro(related_cpus);
0924 cpufreq_freq_attr_ro(affected_cpus);
0925 cpufreq_freq_attr_rw(scaling_min_freq);
0926 cpufreq_freq_attr_rw(scaling_max_freq);
0927 cpufreq_freq_attr_rw(scaling_governor);
0928 cpufreq_freq_attr_rw(scaling_setspeed);
0929
0930 static struct attribute *cpufreq_attrs[] = {
0931 &cpuinfo_min_freq.attr,
0932 &cpuinfo_max_freq.attr,
0933 &cpuinfo_transition_latency.attr,
0934 &scaling_min_freq.attr,
0935 &scaling_max_freq.attr,
0936 &affected_cpus.attr,
0937 &related_cpus.attr,
0938 &scaling_governor.attr,
0939 &scaling_driver.attr,
0940 &scaling_available_governors.attr,
0941 &scaling_setspeed.attr,
0942 NULL
0943 };
0944 ATTRIBUTE_GROUPS(cpufreq);
0945
0946 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
0947 #define to_attr(a) container_of(a, struct freq_attr, attr)
0948
0949 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
0950 {
0951 struct cpufreq_policy *policy = to_policy(kobj);
0952 struct freq_attr *fattr = to_attr(attr);
0953 ssize_t ret = -EBUSY;
0954
0955 if (!fattr->show)
0956 return -EIO;
0957
0958 down_read(&policy->rwsem);
0959 if (likely(!policy_is_inactive(policy)))
0960 ret = fattr->show(policy, buf);
0961 up_read(&policy->rwsem);
0962
0963 return ret;
0964 }
0965
0966 static ssize_t store(struct kobject *kobj, struct attribute *attr,
0967 const char *buf, size_t count)
0968 {
0969 struct cpufreq_policy *policy = to_policy(kobj);
0970 struct freq_attr *fattr = to_attr(attr);
0971 ssize_t ret = -EBUSY;
0972
0973 if (!fattr->store)
0974 return -EIO;
0975
0976 down_write(&policy->rwsem);
0977 if (likely(!policy_is_inactive(policy)))
0978 ret = fattr->store(policy, buf, count);
0979 up_write(&policy->rwsem);
0980
0981 return ret;
0982 }
0983
0984 static void cpufreq_sysfs_release(struct kobject *kobj)
0985 {
0986 struct cpufreq_policy *policy = to_policy(kobj);
0987 pr_debug("last reference is dropped\n");
0988 complete(&policy->kobj_unregister);
0989 }
0990
0991 static const struct sysfs_ops sysfs_ops = {
0992 .show = show,
0993 .store = store,
0994 };
0995
0996 static struct kobj_type ktype_cpufreq = {
0997 .sysfs_ops = &sysfs_ops,
0998 .default_groups = cpufreq_groups,
0999 .release = cpufreq_sysfs_release,
1000 };
1001
1002 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1003 struct device *dev)
1004 {
1005 if (unlikely(!dev))
1006 return;
1007
1008 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1009 return;
1010
1011 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1012 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1013 dev_err(dev, "cpufreq symlink creation failed\n");
1014 }
1015
1016 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1017 struct device *dev)
1018 {
1019 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1020 sysfs_remove_link(&dev->kobj, "cpufreq");
1021 cpumask_clear_cpu(cpu, policy->real_cpus);
1022 }
1023
1024 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1025 {
1026 struct freq_attr **drv_attr;
1027 int ret = 0;
1028
1029
1030 drv_attr = cpufreq_driver->attr;
1031 while (drv_attr && *drv_attr) {
1032 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1033 if (ret)
1034 return ret;
1035 drv_attr++;
1036 }
1037 if (cpufreq_driver->get) {
1038 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1039 if (ret)
1040 return ret;
1041 }
1042
1043 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1044 if (ret)
1045 return ret;
1046
1047 if (cpufreq_driver->bios_limit) {
1048 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1049 if (ret)
1050 return ret;
1051 }
1052
1053 return 0;
1054 }
1055
1056 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1057 {
1058 struct cpufreq_governor *gov = NULL;
1059 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1060 int ret;
1061
1062 if (has_target()) {
1063
1064 gov = get_governor(policy->last_governor);
1065 if (gov) {
1066 pr_debug("Restoring governor %s for cpu %d\n",
1067 gov->name, policy->cpu);
1068 } else {
1069 gov = get_governor(default_governor);
1070 }
1071
1072 if (!gov) {
1073 gov = cpufreq_default_governor();
1074 __module_get(gov->owner);
1075 }
1076
1077 } else {
1078
1079
1080 if (policy->last_policy) {
1081 pol = policy->last_policy;
1082 } else {
1083 pol = cpufreq_parse_policy(default_governor);
1084
1085
1086
1087
1088
1089 if (pol == CPUFREQ_POLICY_UNKNOWN)
1090 pol = policy->policy;
1091 }
1092 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1093 pol != CPUFREQ_POLICY_POWERSAVE)
1094 return -ENODATA;
1095 }
1096
1097 ret = cpufreq_set_policy(policy, gov, pol);
1098 if (gov)
1099 module_put(gov->owner);
1100
1101 return ret;
1102 }
1103
1104 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1105 {
1106 int ret = 0;
1107
1108
1109 if (cpumask_test_cpu(cpu, policy->cpus))
1110 return 0;
1111
1112 down_write(&policy->rwsem);
1113 if (has_target())
1114 cpufreq_stop_governor(policy);
1115
1116 cpumask_set_cpu(cpu, policy->cpus);
1117
1118 if (has_target()) {
1119 ret = cpufreq_start_governor(policy);
1120 if (ret)
1121 pr_err("%s: Failed to start governor\n", __func__);
1122 }
1123 up_write(&policy->rwsem);
1124 return ret;
1125 }
1126
1127 void refresh_frequency_limits(struct cpufreq_policy *policy)
1128 {
1129 if (!policy_is_inactive(policy)) {
1130 pr_debug("updating policy for CPU %u\n", policy->cpu);
1131
1132 cpufreq_set_policy(policy, policy->governor, policy->policy);
1133 }
1134 }
1135 EXPORT_SYMBOL(refresh_frequency_limits);
1136
1137 static void handle_update(struct work_struct *work)
1138 {
1139 struct cpufreq_policy *policy =
1140 container_of(work, struct cpufreq_policy, update);
1141
1142 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1143 down_write(&policy->rwsem);
1144 refresh_frequency_limits(policy);
1145 up_write(&policy->rwsem);
1146 }
1147
1148 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1149 void *data)
1150 {
1151 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1152
1153 schedule_work(&policy->update);
1154 return 0;
1155 }
1156
1157 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1158 void *data)
1159 {
1160 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1161
1162 schedule_work(&policy->update);
1163 return 0;
1164 }
1165
1166 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1167 {
1168 struct kobject *kobj;
1169 struct completion *cmp;
1170
1171 down_write(&policy->rwsem);
1172 cpufreq_stats_free_table(policy);
1173 kobj = &policy->kobj;
1174 cmp = &policy->kobj_unregister;
1175 up_write(&policy->rwsem);
1176 kobject_put(kobj);
1177
1178
1179
1180
1181
1182
1183 pr_debug("waiting for dropping of refcount\n");
1184 wait_for_completion(cmp);
1185 pr_debug("wait complete\n");
1186 }
1187
1188 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1189 {
1190 struct cpufreq_policy *policy;
1191 struct device *dev = get_cpu_device(cpu);
1192 int ret;
1193
1194 if (!dev)
1195 return NULL;
1196
1197 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1198 if (!policy)
1199 return NULL;
1200
1201 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1202 goto err_free_policy;
1203
1204 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1205 goto err_free_cpumask;
1206
1207 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1208 goto err_free_rcpumask;
1209
1210 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1211 cpufreq_global_kobject, "policy%u", cpu);
1212 if (ret) {
1213 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1214
1215
1216
1217
1218
1219 kobject_put(&policy->kobj);
1220 goto err_free_real_cpus;
1221 }
1222
1223 freq_constraints_init(&policy->constraints);
1224
1225 policy->nb_min.notifier_call = cpufreq_notifier_min;
1226 policy->nb_max.notifier_call = cpufreq_notifier_max;
1227
1228 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1229 &policy->nb_min);
1230 if (ret) {
1231 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1232 ret, cpumask_pr_args(policy->cpus));
1233 goto err_kobj_remove;
1234 }
1235
1236 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1237 &policy->nb_max);
1238 if (ret) {
1239 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1240 ret, cpumask_pr_args(policy->cpus));
1241 goto err_min_qos_notifier;
1242 }
1243
1244 INIT_LIST_HEAD(&policy->policy_list);
1245 init_rwsem(&policy->rwsem);
1246 spin_lock_init(&policy->transition_lock);
1247 init_waitqueue_head(&policy->transition_wait);
1248 init_completion(&policy->kobj_unregister);
1249 INIT_WORK(&policy->update, handle_update);
1250
1251 policy->cpu = cpu;
1252 return policy;
1253
1254 err_min_qos_notifier:
1255 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1256 &policy->nb_min);
1257 err_kobj_remove:
1258 cpufreq_policy_put_kobj(policy);
1259 err_free_real_cpus:
1260 free_cpumask_var(policy->real_cpus);
1261 err_free_rcpumask:
1262 free_cpumask_var(policy->related_cpus);
1263 err_free_cpumask:
1264 free_cpumask_var(policy->cpus);
1265 err_free_policy:
1266 kfree(policy);
1267
1268 return NULL;
1269 }
1270
1271 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1272 {
1273 unsigned long flags;
1274 int cpu;
1275
1276
1277
1278
1279
1280 if (unlikely(!policy_is_inactive(policy)))
1281 pr_warn("%s: Freeing active policy\n", __func__);
1282
1283
1284 write_lock_irqsave(&cpufreq_driver_lock, flags);
1285 list_del(&policy->policy_list);
1286
1287 for_each_cpu(cpu, policy->related_cpus)
1288 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1289 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1290
1291 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1292 &policy->nb_max);
1293 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1294 &policy->nb_min);
1295
1296
1297 cancel_work_sync(&policy->update);
1298
1299 if (policy->max_freq_req) {
1300
1301
1302
1303
1304
1305 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1306 CPUFREQ_REMOVE_POLICY, policy);
1307 freq_qos_remove_request(policy->max_freq_req);
1308 }
1309
1310 freq_qos_remove_request(policy->min_freq_req);
1311 kfree(policy->min_freq_req);
1312
1313 cpufreq_policy_put_kobj(policy);
1314 free_cpumask_var(policy->real_cpus);
1315 free_cpumask_var(policy->related_cpus);
1316 free_cpumask_var(policy->cpus);
1317 kfree(policy);
1318 }
1319
1320 static int cpufreq_online(unsigned int cpu)
1321 {
1322 struct cpufreq_policy *policy;
1323 bool new_policy;
1324 unsigned long flags;
1325 unsigned int j;
1326 int ret;
1327
1328 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1329
1330
1331 policy = per_cpu(cpufreq_cpu_data, cpu);
1332 if (policy) {
1333 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1334 if (!policy_is_inactive(policy))
1335 return cpufreq_add_policy_cpu(policy, cpu);
1336
1337
1338 new_policy = false;
1339 down_write(&policy->rwsem);
1340 policy->cpu = cpu;
1341 policy->governor = NULL;
1342 } else {
1343 new_policy = true;
1344 policy = cpufreq_policy_alloc(cpu);
1345 if (!policy)
1346 return -ENOMEM;
1347 down_write(&policy->rwsem);
1348 }
1349
1350 if (!new_policy && cpufreq_driver->online) {
1351
1352 cpumask_copy(policy->cpus, policy->related_cpus);
1353
1354 ret = cpufreq_driver->online(policy);
1355 if (ret) {
1356 pr_debug("%s: %d: initialization failed\n", __func__,
1357 __LINE__);
1358 goto out_exit_policy;
1359 }
1360 } else {
1361 cpumask_copy(policy->cpus, cpumask_of(cpu));
1362
1363
1364
1365
1366
1367 ret = cpufreq_driver->init(policy);
1368 if (ret) {
1369 pr_debug("%s: %d: initialization failed\n", __func__,
1370 __LINE__);
1371 goto out_free_policy;
1372 }
1373
1374
1375
1376
1377
1378
1379 ret = cpufreq_table_validate_and_sort(policy);
1380 if (ret)
1381 goto out_offline_policy;
1382
1383
1384 cpumask_copy(policy->related_cpus, policy->cpus);
1385 }
1386
1387
1388
1389
1390
1391 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1392
1393 if (new_policy) {
1394 for_each_cpu(j, policy->related_cpus) {
1395 per_cpu(cpufreq_cpu_data, j) = policy;
1396 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1397 }
1398
1399 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1400 GFP_KERNEL);
1401 if (!policy->min_freq_req) {
1402 ret = -ENOMEM;
1403 goto out_destroy_policy;
1404 }
1405
1406 ret = freq_qos_add_request(&policy->constraints,
1407 policy->min_freq_req, FREQ_QOS_MIN,
1408 FREQ_QOS_MIN_DEFAULT_VALUE);
1409 if (ret < 0) {
1410
1411
1412
1413
1414 kfree(policy->min_freq_req);
1415 policy->min_freq_req = NULL;
1416 goto out_destroy_policy;
1417 }
1418
1419
1420
1421
1422
1423
1424 policy->max_freq_req = policy->min_freq_req + 1;
1425
1426 ret = freq_qos_add_request(&policy->constraints,
1427 policy->max_freq_req, FREQ_QOS_MAX,
1428 FREQ_QOS_MAX_DEFAULT_VALUE);
1429 if (ret < 0) {
1430 policy->max_freq_req = NULL;
1431 goto out_destroy_policy;
1432 }
1433
1434 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1435 CPUFREQ_CREATE_POLICY, policy);
1436 }
1437
1438 if (cpufreq_driver->get && has_target()) {
1439 policy->cur = cpufreq_driver->get(policy->cpu);
1440 if (!policy->cur) {
1441 ret = -EIO;
1442 pr_err("%s: ->get() failed\n", __func__);
1443 goto out_destroy_policy;
1444 }
1445 }
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1466 && has_target()) {
1467 unsigned int old_freq = policy->cur;
1468
1469
1470 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1471 if (ret == -EINVAL) {
1472 ret = __cpufreq_driver_target(policy, old_freq - 1,
1473 CPUFREQ_RELATION_L);
1474
1475
1476
1477
1478
1479
1480 BUG_ON(ret);
1481 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1482 __func__, policy->cpu, old_freq, policy->cur);
1483 }
1484 }
1485
1486 if (new_policy) {
1487 ret = cpufreq_add_dev_interface(policy);
1488 if (ret)
1489 goto out_destroy_policy;
1490
1491 cpufreq_stats_create_table(policy);
1492
1493 write_lock_irqsave(&cpufreq_driver_lock, flags);
1494 list_add(&policy->policy_list, &cpufreq_policy_list);
1495 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507 if (cpufreq_driver->register_em)
1508 cpufreq_driver->register_em(policy);
1509 }
1510
1511 ret = cpufreq_init_policy(policy);
1512 if (ret) {
1513 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1514 __func__, cpu, ret);
1515 goto out_destroy_policy;
1516 }
1517
1518 up_write(&policy->rwsem);
1519
1520 kobject_uevent(&policy->kobj, KOBJ_ADD);
1521
1522
1523 if (cpufreq_driver->ready)
1524 cpufreq_driver->ready(policy);
1525
1526 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1527 policy->cdev = of_cpufreq_cooling_register(policy);
1528
1529 pr_debug("initialization complete\n");
1530
1531 return 0;
1532
1533 out_destroy_policy:
1534 for_each_cpu(j, policy->real_cpus)
1535 remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1536
1537 out_offline_policy:
1538 if (cpufreq_driver->offline)
1539 cpufreq_driver->offline(policy);
1540
1541 out_exit_policy:
1542 if (cpufreq_driver->exit)
1543 cpufreq_driver->exit(policy);
1544
1545 out_free_policy:
1546 cpumask_clear(policy->cpus);
1547 up_write(&policy->rwsem);
1548
1549 cpufreq_policy_free(policy);
1550 return ret;
1551 }
1552
1553
1554
1555
1556
1557
1558 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1559 {
1560 struct cpufreq_policy *policy;
1561 unsigned cpu = dev->id;
1562 int ret;
1563
1564 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1565
1566 if (cpu_online(cpu)) {
1567 ret = cpufreq_online(cpu);
1568 if (ret)
1569 return ret;
1570 }
1571
1572
1573 policy = per_cpu(cpufreq_cpu_data, cpu);
1574 if (policy)
1575 add_cpu_dev_symlink(policy, cpu, dev);
1576
1577 return 0;
1578 }
1579
1580 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1581 {
1582 int ret;
1583
1584 if (has_target())
1585 cpufreq_stop_governor(policy);
1586
1587 cpumask_clear_cpu(cpu, policy->cpus);
1588
1589 if (!policy_is_inactive(policy)) {
1590
1591 if (cpu == policy->cpu)
1592 policy->cpu = cpumask_any(policy->cpus);
1593
1594
1595 if (has_target()) {
1596 ret = cpufreq_start_governor(policy);
1597 if (ret)
1598 pr_err("%s: Failed to start governor\n", __func__);
1599 }
1600
1601 return;
1602 }
1603
1604 if (has_target())
1605 strncpy(policy->last_governor, policy->governor->name,
1606 CPUFREQ_NAME_LEN);
1607 else
1608 policy->last_policy = policy->policy;
1609
1610 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1611 cpufreq_cooling_unregister(policy->cdev);
1612 policy->cdev = NULL;
1613 }
1614
1615 if (has_target())
1616 cpufreq_exit_governor(policy);
1617
1618
1619
1620
1621
1622 if (cpufreq_driver->offline) {
1623 cpufreq_driver->offline(policy);
1624 } else if (cpufreq_driver->exit) {
1625 cpufreq_driver->exit(policy);
1626 policy->freq_table = NULL;
1627 }
1628 }
1629
1630 static int cpufreq_offline(unsigned int cpu)
1631 {
1632 struct cpufreq_policy *policy;
1633
1634 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1635
1636 policy = cpufreq_cpu_get_raw(cpu);
1637 if (!policy) {
1638 pr_debug("%s: No cpu_data found\n", __func__);
1639 return 0;
1640 }
1641
1642 down_write(&policy->rwsem);
1643
1644 __cpufreq_offline(cpu, policy);
1645
1646 up_write(&policy->rwsem);
1647 return 0;
1648 }
1649
1650
1651
1652
1653
1654
1655 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1656 {
1657 unsigned int cpu = dev->id;
1658 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1659
1660 if (!policy)
1661 return;
1662
1663 down_write(&policy->rwsem);
1664
1665 if (cpu_online(cpu))
1666 __cpufreq_offline(cpu, policy);
1667
1668 remove_cpu_dev_symlink(policy, cpu, dev);
1669
1670 if (!cpumask_empty(policy->real_cpus)) {
1671 up_write(&policy->rwsem);
1672 return;
1673 }
1674
1675
1676 if (cpufreq_driver->offline)
1677 cpufreq_driver->exit(policy);
1678
1679 up_write(&policy->rwsem);
1680
1681 cpufreq_policy_free(policy);
1682 }
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1693 unsigned int new_freq)
1694 {
1695 struct cpufreq_freqs freqs;
1696
1697 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1698 policy->cur, new_freq);
1699
1700 freqs.old = policy->cur;
1701 freqs.new = new_freq;
1702
1703 cpufreq_freq_transition_begin(policy, &freqs);
1704 cpufreq_freq_transition_end(policy, &freqs, 0);
1705 }
1706
1707 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1708 {
1709 unsigned int new_freq;
1710
1711 new_freq = cpufreq_driver->get(policy->cpu);
1712 if (!new_freq)
1713 return 0;
1714
1715
1716
1717
1718
1719 if (policy->fast_switch_enabled || !has_target())
1720 return new_freq;
1721
1722 if (policy->cur != new_freq) {
1723
1724
1725
1726
1727
1728
1729
1730 if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
1731 return policy->cur;
1732
1733 cpufreq_out_of_sync(policy, new_freq);
1734 if (update)
1735 schedule_work(&policy->update);
1736 }
1737
1738 return new_freq;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748 unsigned int cpufreq_quick_get(unsigned int cpu)
1749 {
1750 struct cpufreq_policy *policy;
1751 unsigned int ret_freq = 0;
1752 unsigned long flags;
1753
1754 read_lock_irqsave(&cpufreq_driver_lock, flags);
1755
1756 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1757 ret_freq = cpufreq_driver->get(cpu);
1758 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1759 return ret_freq;
1760 }
1761
1762 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1763
1764 policy = cpufreq_cpu_get(cpu);
1765 if (policy) {
1766 ret_freq = policy->cur;
1767 cpufreq_cpu_put(policy);
1768 }
1769
1770 return ret_freq;
1771 }
1772 EXPORT_SYMBOL(cpufreq_quick_get);
1773
1774
1775
1776
1777
1778
1779
1780 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1781 {
1782 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1783 unsigned int ret_freq = 0;
1784
1785 if (policy) {
1786 ret_freq = policy->max;
1787 cpufreq_cpu_put(policy);
1788 }
1789
1790 return ret_freq;
1791 }
1792 EXPORT_SYMBOL(cpufreq_quick_get_max);
1793
1794
1795
1796
1797
1798
1799
1800 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1801 {
1802 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1803 unsigned int ret_freq = 0;
1804
1805 if (policy) {
1806 ret_freq = policy->cpuinfo.max_freq;
1807 cpufreq_cpu_put(policy);
1808 }
1809
1810 return ret_freq;
1811 }
1812 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1813
1814 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1815 {
1816 if (unlikely(policy_is_inactive(policy)))
1817 return 0;
1818
1819 return cpufreq_verify_current_freq(policy, true);
1820 }
1821
1822
1823
1824
1825
1826
1827
1828 unsigned int cpufreq_get(unsigned int cpu)
1829 {
1830 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1831 unsigned int ret_freq = 0;
1832
1833 if (policy) {
1834 down_read(&policy->rwsem);
1835 if (cpufreq_driver->get)
1836 ret_freq = __cpufreq_get(policy);
1837 up_read(&policy->rwsem);
1838
1839 cpufreq_cpu_put(policy);
1840 }
1841
1842 return ret_freq;
1843 }
1844 EXPORT_SYMBOL(cpufreq_get);
1845
1846 static struct subsys_interface cpufreq_interface = {
1847 .name = "cpufreq",
1848 .subsys = &cpu_subsys,
1849 .add_dev = cpufreq_add_dev,
1850 .remove_dev = cpufreq_remove_dev,
1851 };
1852
1853
1854
1855
1856
1857 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1858 {
1859 int ret;
1860
1861 if (!policy->suspend_freq) {
1862 pr_debug("%s: suspend_freq not defined\n", __func__);
1863 return 0;
1864 }
1865
1866 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1867 policy->suspend_freq);
1868
1869 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1870 CPUFREQ_RELATION_H);
1871 if (ret)
1872 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1873 __func__, policy->suspend_freq, ret);
1874
1875 return ret;
1876 }
1877 EXPORT_SYMBOL(cpufreq_generic_suspend);
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887 void cpufreq_suspend(void)
1888 {
1889 struct cpufreq_policy *policy;
1890
1891 if (!cpufreq_driver)
1892 return;
1893
1894 if (!has_target() && !cpufreq_driver->suspend)
1895 goto suspend;
1896
1897 pr_debug("%s: Suspending Governors\n", __func__);
1898
1899 for_each_active_policy(policy) {
1900 if (has_target()) {
1901 down_write(&policy->rwsem);
1902 cpufreq_stop_governor(policy);
1903 up_write(&policy->rwsem);
1904 }
1905
1906 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1907 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1908 cpufreq_driver->name);
1909 }
1910
1911 suspend:
1912 cpufreq_suspended = true;
1913 }
1914
1915
1916
1917
1918
1919
1920
1921 void cpufreq_resume(void)
1922 {
1923 struct cpufreq_policy *policy;
1924 int ret;
1925
1926 if (!cpufreq_driver)
1927 return;
1928
1929 if (unlikely(!cpufreq_suspended))
1930 return;
1931
1932 cpufreq_suspended = false;
1933
1934 if (!has_target() && !cpufreq_driver->resume)
1935 return;
1936
1937 pr_debug("%s: Resuming Governors\n", __func__);
1938
1939 for_each_active_policy(policy) {
1940 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1941 pr_err("%s: Failed to resume driver: %p\n", __func__,
1942 policy);
1943 } else if (has_target()) {
1944 down_write(&policy->rwsem);
1945 ret = cpufreq_start_governor(policy);
1946 up_write(&policy->rwsem);
1947
1948 if (ret)
1949 pr_err("%s: Failed to start governor for policy: %p\n",
1950 __func__, policy);
1951 }
1952 }
1953 }
1954
1955
1956
1957
1958
1959
1960
1961
1962 bool cpufreq_driver_test_flags(u16 flags)
1963 {
1964 return !!(cpufreq_driver->flags & flags);
1965 }
1966
1967
1968
1969
1970
1971
1972
1973 const char *cpufreq_get_current_driver(void)
1974 {
1975 if (cpufreq_driver)
1976 return cpufreq_driver->name;
1977
1978 return NULL;
1979 }
1980 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1981
1982
1983
1984
1985
1986
1987
1988 void *cpufreq_get_driver_data(void)
1989 {
1990 if (cpufreq_driver)
1991 return cpufreq_driver->driver_data;
1992
1993 return NULL;
1994 }
1995 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2014 {
2015 int ret;
2016
2017 if (cpufreq_disabled())
2018 return -EINVAL;
2019
2020 switch (list) {
2021 case CPUFREQ_TRANSITION_NOTIFIER:
2022 mutex_lock(&cpufreq_fast_switch_lock);
2023
2024 if (cpufreq_fast_switch_count > 0) {
2025 mutex_unlock(&cpufreq_fast_switch_lock);
2026 return -EBUSY;
2027 }
2028 ret = srcu_notifier_chain_register(
2029 &cpufreq_transition_notifier_list, nb);
2030 if (!ret)
2031 cpufreq_fast_switch_count--;
2032
2033 mutex_unlock(&cpufreq_fast_switch_lock);
2034 break;
2035 case CPUFREQ_POLICY_NOTIFIER:
2036 ret = blocking_notifier_chain_register(
2037 &cpufreq_policy_notifier_list, nb);
2038 break;
2039 default:
2040 ret = -EINVAL;
2041 }
2042
2043 return ret;
2044 }
2045 EXPORT_SYMBOL(cpufreq_register_notifier);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2058 {
2059 int ret;
2060
2061 if (cpufreq_disabled())
2062 return -EINVAL;
2063
2064 switch (list) {
2065 case CPUFREQ_TRANSITION_NOTIFIER:
2066 mutex_lock(&cpufreq_fast_switch_lock);
2067
2068 ret = srcu_notifier_chain_unregister(
2069 &cpufreq_transition_notifier_list, nb);
2070 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2071 cpufreq_fast_switch_count++;
2072
2073 mutex_unlock(&cpufreq_fast_switch_lock);
2074 break;
2075 case CPUFREQ_POLICY_NOTIFIER:
2076 ret = blocking_notifier_chain_unregister(
2077 &cpufreq_policy_notifier_list, nb);
2078 break;
2079 default:
2080 ret = -EINVAL;
2081 }
2082
2083 return ret;
2084 }
2085 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2116 unsigned int target_freq)
2117 {
2118 unsigned int freq;
2119 int cpu;
2120
2121 target_freq = clamp_val(target_freq, policy->min, policy->max);
2122 freq = cpufreq_driver->fast_switch(policy, target_freq);
2123
2124 if (!freq)
2125 return 0;
2126
2127 policy->cur = freq;
2128 arch_set_freq_scale(policy->related_cpus, freq,
2129 policy->cpuinfo.max_freq);
2130 cpufreq_stats_record_transition(policy, freq);
2131
2132 if (trace_cpu_frequency_enabled()) {
2133 for_each_cpu(cpu, policy->cpus)
2134 trace_cpu_frequency(freq, cpu);
2135 }
2136
2137 return freq;
2138 }
2139 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162 void cpufreq_driver_adjust_perf(unsigned int cpu,
2163 unsigned long min_perf,
2164 unsigned long target_perf,
2165 unsigned long capacity)
2166 {
2167 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2168 }
2169
2170
2171
2172
2173
2174
2175
2176 bool cpufreq_driver_has_adjust_perf(void)
2177 {
2178 return !!cpufreq_driver->adjust_perf;
2179 }
2180
2181
2182 static int __target_intermediate(struct cpufreq_policy *policy,
2183 struct cpufreq_freqs *freqs, int index)
2184 {
2185 int ret;
2186
2187 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2188
2189
2190 if (!freqs->new)
2191 return 0;
2192
2193 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2194 __func__, policy->cpu, freqs->old, freqs->new);
2195
2196 cpufreq_freq_transition_begin(policy, freqs);
2197 ret = cpufreq_driver->target_intermediate(policy, index);
2198 cpufreq_freq_transition_end(policy, freqs, ret);
2199
2200 if (ret)
2201 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2202 __func__, ret);
2203
2204 return ret;
2205 }
2206
2207 static int __target_index(struct cpufreq_policy *policy, int index)
2208 {
2209 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2210 unsigned int restore_freq, intermediate_freq = 0;
2211 unsigned int newfreq = policy->freq_table[index].frequency;
2212 int retval = -EINVAL;
2213 bool notify;
2214
2215 if (newfreq == policy->cur)
2216 return 0;
2217
2218
2219 restore_freq = policy->cur;
2220
2221 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2222 if (notify) {
2223
2224 if (cpufreq_driver->get_intermediate) {
2225 retval = __target_intermediate(policy, &freqs, index);
2226 if (retval)
2227 return retval;
2228
2229 intermediate_freq = freqs.new;
2230
2231 if (intermediate_freq)
2232 freqs.old = freqs.new;
2233 }
2234
2235 freqs.new = newfreq;
2236 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2237 __func__, policy->cpu, freqs.old, freqs.new);
2238
2239 cpufreq_freq_transition_begin(policy, &freqs);
2240 }
2241
2242 retval = cpufreq_driver->target_index(policy, index);
2243 if (retval)
2244 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2245 retval);
2246
2247 if (notify) {
2248 cpufreq_freq_transition_end(policy, &freqs, retval);
2249
2250
2251
2252
2253
2254
2255
2256 if (unlikely(retval && intermediate_freq)) {
2257 freqs.old = intermediate_freq;
2258 freqs.new = restore_freq;
2259 cpufreq_freq_transition_begin(policy, &freqs);
2260 cpufreq_freq_transition_end(policy, &freqs, 0);
2261 }
2262 }
2263
2264 return retval;
2265 }
2266
2267 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2268 unsigned int target_freq,
2269 unsigned int relation)
2270 {
2271 unsigned int old_target_freq = target_freq;
2272
2273 if (cpufreq_disabled())
2274 return -ENODEV;
2275
2276 target_freq = __resolve_freq(policy, target_freq, relation);
2277
2278 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2279 policy->cpu, target_freq, relation, old_target_freq);
2280
2281
2282
2283
2284
2285
2286
2287 if (target_freq == policy->cur &&
2288 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2289 return 0;
2290
2291 if (cpufreq_driver->target) {
2292
2293
2294
2295
2296 if (!policy->efficiencies_available)
2297 relation &= ~CPUFREQ_RELATION_E;
2298
2299 return cpufreq_driver->target(policy, target_freq, relation);
2300 }
2301
2302 if (!cpufreq_driver->target_index)
2303 return -EINVAL;
2304
2305 return __target_index(policy, policy->cached_resolved_idx);
2306 }
2307 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2308
2309 int cpufreq_driver_target(struct cpufreq_policy *policy,
2310 unsigned int target_freq,
2311 unsigned int relation)
2312 {
2313 int ret;
2314
2315 down_write(&policy->rwsem);
2316
2317 ret = __cpufreq_driver_target(policy, target_freq, relation);
2318
2319 up_write(&policy->rwsem);
2320
2321 return ret;
2322 }
2323 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2324
2325 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2326 {
2327 return NULL;
2328 }
2329
2330 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2331 {
2332 int ret;
2333
2334
2335 if (cpufreq_suspended)
2336 return 0;
2337
2338
2339
2340
2341 if (!policy->governor)
2342 return -EINVAL;
2343
2344
2345 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2346 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2347 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2348
2349 if (gov) {
2350 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2351 policy->governor->name, gov->name);
2352 policy->governor = gov;
2353 } else {
2354 return -EINVAL;
2355 }
2356 }
2357
2358 if (!try_module_get(policy->governor->owner))
2359 return -EINVAL;
2360
2361 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2362
2363 if (policy->governor->init) {
2364 ret = policy->governor->init(policy);
2365 if (ret) {
2366 module_put(policy->governor->owner);
2367 return ret;
2368 }
2369 }
2370
2371 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2372
2373 return 0;
2374 }
2375
2376 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2377 {
2378 if (cpufreq_suspended || !policy->governor)
2379 return;
2380
2381 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2382
2383 if (policy->governor->exit)
2384 policy->governor->exit(policy);
2385
2386 module_put(policy->governor->owner);
2387 }
2388
2389 int cpufreq_start_governor(struct cpufreq_policy *policy)
2390 {
2391 int ret;
2392
2393 if (cpufreq_suspended)
2394 return 0;
2395
2396 if (!policy->governor)
2397 return -EINVAL;
2398
2399 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2400
2401 if (cpufreq_driver->get)
2402 cpufreq_verify_current_freq(policy, false);
2403
2404 if (policy->governor->start) {
2405 ret = policy->governor->start(policy);
2406 if (ret)
2407 return ret;
2408 }
2409
2410 if (policy->governor->limits)
2411 policy->governor->limits(policy);
2412
2413 return 0;
2414 }
2415
2416 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2417 {
2418 if (cpufreq_suspended || !policy->governor)
2419 return;
2420
2421 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2422
2423 if (policy->governor->stop)
2424 policy->governor->stop(policy);
2425 }
2426
2427 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2428 {
2429 if (cpufreq_suspended || !policy->governor)
2430 return;
2431
2432 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2433
2434 if (policy->governor->limits)
2435 policy->governor->limits(policy);
2436 }
2437
2438 int cpufreq_register_governor(struct cpufreq_governor *governor)
2439 {
2440 int err;
2441
2442 if (!governor)
2443 return -EINVAL;
2444
2445 if (cpufreq_disabled())
2446 return -ENODEV;
2447
2448 mutex_lock(&cpufreq_governor_mutex);
2449
2450 err = -EBUSY;
2451 if (!find_governor(governor->name)) {
2452 err = 0;
2453 list_add(&governor->governor_list, &cpufreq_governor_list);
2454 }
2455
2456 mutex_unlock(&cpufreq_governor_mutex);
2457 return err;
2458 }
2459 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2460
2461 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2462 {
2463 struct cpufreq_policy *policy;
2464 unsigned long flags;
2465
2466 if (!governor)
2467 return;
2468
2469 if (cpufreq_disabled())
2470 return;
2471
2472
2473 read_lock_irqsave(&cpufreq_driver_lock, flags);
2474 for_each_inactive_policy(policy) {
2475 if (!strcmp(policy->last_governor, governor->name)) {
2476 policy->governor = NULL;
2477 strcpy(policy->last_governor, "\0");
2478 }
2479 }
2480 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2481
2482 mutex_lock(&cpufreq_governor_mutex);
2483 list_del(&governor->governor_list);
2484 mutex_unlock(&cpufreq_governor_mutex);
2485 }
2486 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2502 {
2503 struct cpufreq_policy *cpu_policy;
2504 if (!policy)
2505 return -EINVAL;
2506
2507 cpu_policy = cpufreq_cpu_get(cpu);
2508 if (!cpu_policy)
2509 return -EINVAL;
2510
2511 memcpy(policy, cpu_policy, sizeof(*policy));
2512
2513 cpufreq_cpu_put(cpu_policy);
2514 return 0;
2515 }
2516 EXPORT_SYMBOL(cpufreq_get_policy);
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2534 struct cpufreq_governor *new_gov,
2535 unsigned int new_pol)
2536 {
2537 struct cpufreq_policy_data new_data;
2538 struct cpufreq_governor *old_gov;
2539 int ret;
2540
2541 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2542 new_data.freq_table = policy->freq_table;
2543 new_data.cpu = policy->cpu;
2544
2545
2546
2547
2548 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2549 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2550
2551 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2552 new_data.cpu, new_data.min, new_data.max);
2553
2554
2555
2556
2557
2558 ret = cpufreq_driver->verify(&new_data);
2559 if (ret)
2560 return ret;
2561
2562
2563
2564
2565
2566
2567 policy->min = new_data.min;
2568 policy->max = new_data.max;
2569 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2570 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2571 trace_cpu_frequency_limits(policy);
2572
2573 policy->cached_target_freq = UINT_MAX;
2574
2575 pr_debug("new min and max freqs are %u - %u kHz\n",
2576 policy->min, policy->max);
2577
2578 if (cpufreq_driver->setpolicy) {
2579 policy->policy = new_pol;
2580 pr_debug("setting range\n");
2581 return cpufreq_driver->setpolicy(policy);
2582 }
2583
2584 if (new_gov == policy->governor) {
2585 pr_debug("governor limits update\n");
2586 cpufreq_governor_limits(policy);
2587 return 0;
2588 }
2589
2590 pr_debug("governor switch\n");
2591
2592
2593 old_gov = policy->governor;
2594
2595 if (old_gov) {
2596 cpufreq_stop_governor(policy);
2597 cpufreq_exit_governor(policy);
2598 }
2599
2600
2601 policy->governor = new_gov;
2602 ret = cpufreq_init_governor(policy);
2603 if (!ret) {
2604 ret = cpufreq_start_governor(policy);
2605 if (!ret) {
2606 pr_debug("governor change\n");
2607 sched_cpufreq_governor_change(policy, old_gov);
2608 return 0;
2609 }
2610 cpufreq_exit_governor(policy);
2611 }
2612
2613
2614 pr_debug("starting governor %s failed\n", policy->governor->name);
2615 if (old_gov) {
2616 policy->governor = old_gov;
2617 if (cpufreq_init_governor(policy))
2618 policy->governor = NULL;
2619 else
2620 cpufreq_start_governor(policy);
2621 }
2622
2623 return ret;
2624 }
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635 void cpufreq_update_policy(unsigned int cpu)
2636 {
2637 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2638
2639 if (!policy)
2640 return;
2641
2642
2643
2644
2645
2646 if (cpufreq_driver->get && has_target() &&
2647 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2648 goto unlock;
2649
2650 refresh_frequency_limits(policy);
2651
2652 unlock:
2653 cpufreq_cpu_release(policy);
2654 }
2655 EXPORT_SYMBOL(cpufreq_update_policy);
2656
2657
2658
2659
2660
2661
2662
2663
2664 void cpufreq_update_limits(unsigned int cpu)
2665 {
2666 if (cpufreq_driver->update_limits)
2667 cpufreq_driver->update_limits(cpu);
2668 else
2669 cpufreq_update_policy(cpu);
2670 }
2671 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2672
2673
2674
2675
2676 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2677 {
2678 int ret;
2679
2680 if (!policy->freq_table)
2681 return -ENXIO;
2682
2683 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2684 if (ret) {
2685 pr_err("%s: Policy frequency update failed\n", __func__);
2686 return ret;
2687 }
2688
2689 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2690 if (ret < 0)
2691 return ret;
2692
2693 return 0;
2694 }
2695
2696 int cpufreq_boost_trigger_state(int state)
2697 {
2698 struct cpufreq_policy *policy;
2699 unsigned long flags;
2700 int ret = 0;
2701
2702 if (cpufreq_driver->boost_enabled == state)
2703 return 0;
2704
2705 write_lock_irqsave(&cpufreq_driver_lock, flags);
2706 cpufreq_driver->boost_enabled = state;
2707 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2708
2709 cpus_read_lock();
2710 for_each_active_policy(policy) {
2711 ret = cpufreq_driver->set_boost(policy, state);
2712 if (ret)
2713 goto err_reset_state;
2714 }
2715 cpus_read_unlock();
2716
2717 return 0;
2718
2719 err_reset_state:
2720 cpus_read_unlock();
2721
2722 write_lock_irqsave(&cpufreq_driver_lock, flags);
2723 cpufreq_driver->boost_enabled = !state;
2724 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2725
2726 pr_err("%s: Cannot %s BOOST\n",
2727 __func__, state ? "enable" : "disable");
2728
2729 return ret;
2730 }
2731
2732 static bool cpufreq_boost_supported(void)
2733 {
2734 return cpufreq_driver->set_boost;
2735 }
2736
2737 static int create_boost_sysfs_file(void)
2738 {
2739 int ret;
2740
2741 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2742 if (ret)
2743 pr_err("%s: cannot register global BOOST sysfs file\n",
2744 __func__);
2745
2746 return ret;
2747 }
2748
2749 static void remove_boost_sysfs_file(void)
2750 {
2751 if (cpufreq_boost_supported())
2752 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2753 }
2754
2755 int cpufreq_enable_boost_support(void)
2756 {
2757 if (!cpufreq_driver)
2758 return -EINVAL;
2759
2760 if (cpufreq_boost_supported())
2761 return 0;
2762
2763 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2764
2765
2766 return create_boost_sysfs_file();
2767 }
2768 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2769
2770 int cpufreq_boost_enabled(void)
2771 {
2772 return cpufreq_driver->boost_enabled;
2773 }
2774 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2775
2776
2777
2778
2779 static enum cpuhp_state hp_online;
2780
2781 static int cpuhp_cpufreq_online(unsigned int cpu)
2782 {
2783 cpufreq_online(cpu);
2784
2785 return 0;
2786 }
2787
2788 static int cpuhp_cpufreq_offline(unsigned int cpu)
2789 {
2790 cpufreq_offline(cpu);
2791
2792 return 0;
2793 }
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2806 {
2807 unsigned long flags;
2808 int ret;
2809
2810 if (cpufreq_disabled())
2811 return -ENODEV;
2812
2813
2814
2815
2816
2817 if (!get_cpu_device(0))
2818 return -EPROBE_DEFER;
2819
2820 if (!driver_data || !driver_data->verify || !driver_data->init ||
2821 !(driver_data->setpolicy || driver_data->target_index ||
2822 driver_data->target) ||
2823 (driver_data->setpolicy && (driver_data->target_index ||
2824 driver_data->target)) ||
2825 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2826 (!driver_data->online != !driver_data->offline))
2827 return -EINVAL;
2828
2829 pr_debug("trying to register driver %s\n", driver_data->name);
2830
2831
2832 cpus_read_lock();
2833
2834 write_lock_irqsave(&cpufreq_driver_lock, flags);
2835 if (cpufreq_driver) {
2836 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2837 ret = -EEXIST;
2838 goto out;
2839 }
2840 cpufreq_driver = driver_data;
2841 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2842
2843
2844
2845
2846
2847 if (!cpufreq_driver->setpolicy) {
2848 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2849 pr_debug("supports frequency invariance");
2850 }
2851
2852 if (driver_data->setpolicy)
2853 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2854
2855 if (cpufreq_boost_supported()) {
2856 ret = create_boost_sysfs_file();
2857 if (ret)
2858 goto err_null_driver;
2859 }
2860
2861 ret = subsys_interface_register(&cpufreq_interface);
2862 if (ret)
2863 goto err_boost_unreg;
2864
2865 if (unlikely(list_empty(&cpufreq_policy_list))) {
2866
2867 ret = -ENODEV;
2868 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2869 driver_data->name);
2870 goto err_if_unreg;
2871 }
2872
2873 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2874 "cpufreq:online",
2875 cpuhp_cpufreq_online,
2876 cpuhp_cpufreq_offline);
2877 if (ret < 0)
2878 goto err_if_unreg;
2879 hp_online = ret;
2880 ret = 0;
2881
2882 pr_debug("driver %s up and running\n", driver_data->name);
2883 goto out;
2884
2885 err_if_unreg:
2886 subsys_interface_unregister(&cpufreq_interface);
2887 err_boost_unreg:
2888 remove_boost_sysfs_file();
2889 err_null_driver:
2890 write_lock_irqsave(&cpufreq_driver_lock, flags);
2891 cpufreq_driver = NULL;
2892 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2893 out:
2894 cpus_read_unlock();
2895 return ret;
2896 }
2897 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2908 {
2909 unsigned long flags;
2910
2911 if (!cpufreq_driver || (driver != cpufreq_driver))
2912 return -EINVAL;
2913
2914 pr_debug("unregistering driver %s\n", driver->name);
2915
2916
2917 cpus_read_lock();
2918 subsys_interface_unregister(&cpufreq_interface);
2919 remove_boost_sysfs_file();
2920 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2921 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2922
2923 write_lock_irqsave(&cpufreq_driver_lock, flags);
2924
2925 cpufreq_driver = NULL;
2926
2927 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2928 cpus_read_unlock();
2929
2930 return 0;
2931 }
2932 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2933
2934 static int __init cpufreq_core_init(void)
2935 {
2936 struct cpufreq_governor *gov = cpufreq_default_governor();
2937
2938 if (cpufreq_disabled())
2939 return -ENODEV;
2940
2941 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2942 BUG_ON(!cpufreq_global_kobject);
2943
2944 if (!strlen(default_governor))
2945 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2946
2947 return 0;
2948 }
2949 module_param(off, int, 0444);
2950 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2951 core_initcall(cpufreq_core_init);