0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _LINUX_CPUFREQ_H
0009 #define _LINUX_CPUFREQ_H
0010
0011 #include <linux/clk.h>
0012 #include <linux/cpu.h>
0013 #include <linux/cpumask.h>
0014 #include <linux/completion.h>
0015 #include <linux/kobject.h>
0016 #include <linux/notifier.h>
0017 #include <linux/of.h>
0018 #include <linux/of_device.h>
0019 #include <linux/pm_opp.h>
0020 #include <linux/pm_qos.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/sysfs.h>
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define CPUFREQ_ETERNAL (-1)
0035 #define CPUFREQ_NAME_LEN 16
0036
0037 #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
0038
0039 struct cpufreq_governor;
0040
0041 enum cpufreq_table_sorting {
0042 CPUFREQ_TABLE_UNSORTED,
0043 CPUFREQ_TABLE_SORTED_ASCENDING,
0044 CPUFREQ_TABLE_SORTED_DESCENDING
0045 };
0046
0047 struct cpufreq_cpuinfo {
0048 unsigned int max_freq;
0049 unsigned int min_freq;
0050
0051
0052 unsigned int transition_latency;
0053 };
0054
0055 struct cpufreq_policy {
0056
0057 cpumask_var_t cpus;
0058 cpumask_var_t related_cpus;
0059 cpumask_var_t real_cpus;
0060
0061 unsigned int shared_type;
0062
0063 unsigned int cpu;
0064
0065 struct clk *clk;
0066 struct cpufreq_cpuinfo cpuinfo;
0067
0068 unsigned int min;
0069 unsigned int max;
0070 unsigned int cur;
0071
0072 unsigned int suspend_freq;
0073
0074 unsigned int policy;
0075 unsigned int last_policy;
0076 struct cpufreq_governor *governor;
0077 void *governor_data;
0078 char last_governor[CPUFREQ_NAME_LEN];
0079
0080 struct work_struct update;
0081
0082
0083 struct freq_constraints constraints;
0084 struct freq_qos_request *min_freq_req;
0085 struct freq_qos_request *max_freq_req;
0086
0087 struct cpufreq_frequency_table *freq_table;
0088 enum cpufreq_table_sorting freq_table_sorted;
0089
0090 struct list_head policy_list;
0091 struct kobject kobj;
0092 struct completion kobj_unregister;
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 struct rw_semaphore rwsem;
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 bool fast_switch_possible;
0113 bool fast_switch_enabled;
0114
0115
0116
0117
0118
0119 bool strict_target;
0120
0121
0122
0123
0124
0125
0126 bool efficiencies_available;
0127
0128
0129
0130
0131
0132
0133 unsigned int transition_delay_us;
0134
0135
0136
0137
0138
0139
0140
0141
0142 bool dvfs_possible_from_any_cpu;
0143
0144
0145 unsigned int cached_target_freq;
0146 unsigned int cached_resolved_idx;
0147
0148
0149 bool transition_ongoing;
0150 spinlock_t transition_lock;
0151 wait_queue_head_t transition_wait;
0152 struct task_struct *transition_task;
0153
0154
0155 struct cpufreq_stats *stats;
0156
0157
0158 void *driver_data;
0159
0160
0161 struct thermal_cooling_device *cdev;
0162
0163 struct notifier_block nb_min;
0164 struct notifier_block nb_max;
0165 };
0166
0167
0168
0169
0170
0171
0172
0173 struct cpufreq_policy_data {
0174 struct cpufreq_cpuinfo cpuinfo;
0175 struct cpufreq_frequency_table *freq_table;
0176 unsigned int cpu;
0177 unsigned int min;
0178 unsigned int max;
0179 };
0180
0181 struct cpufreq_freqs {
0182 struct cpufreq_policy *policy;
0183 unsigned int old;
0184 unsigned int new;
0185 u8 flags;
0186 };
0187
0188
0189 #define CPUFREQ_SHARED_TYPE_NONE (0)
0190 #define CPUFREQ_SHARED_TYPE_HW (1)
0191 #define CPUFREQ_SHARED_TYPE_ALL (2)
0192 #define CPUFREQ_SHARED_TYPE_ANY (3)
0193
0194 #ifdef CONFIG_CPU_FREQ
0195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
0196 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
0197 void cpufreq_cpu_put(struct cpufreq_policy *policy);
0198 #else
0199 static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
0200 {
0201 return NULL;
0202 }
0203 static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
0204 {
0205 return NULL;
0206 }
0207 static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
0208 #endif
0209
0210 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
0211 {
0212 return cpumask_empty(policy->cpus);
0213 }
0214
0215 static inline bool policy_is_shared(struct cpufreq_policy *policy)
0216 {
0217 return cpumask_weight(policy->cpus) > 1;
0218 }
0219
0220 #ifdef CONFIG_CPU_FREQ
0221 unsigned int cpufreq_get(unsigned int cpu);
0222 unsigned int cpufreq_quick_get(unsigned int cpu);
0223 unsigned int cpufreq_quick_get_max(unsigned int cpu);
0224 unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
0225 void disable_cpufreq(void);
0226
0227 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
0228
0229 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
0230 void cpufreq_cpu_release(struct cpufreq_policy *policy);
0231 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
0232 void refresh_frequency_limits(struct cpufreq_policy *policy);
0233 void cpufreq_update_policy(unsigned int cpu);
0234 void cpufreq_update_limits(unsigned int cpu);
0235 bool have_governor_per_policy(void);
0236 bool cpufreq_supports_freq_invariance(void);
0237 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
0238 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
0239 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
0240 #else
0241 static inline unsigned int cpufreq_get(unsigned int cpu)
0242 {
0243 return 0;
0244 }
0245 static inline unsigned int cpufreq_quick_get(unsigned int cpu)
0246 {
0247 return 0;
0248 }
0249 static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
0250 {
0251 return 0;
0252 }
0253 static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
0254 {
0255 return 0;
0256 }
0257 static inline bool cpufreq_supports_freq_invariance(void)
0258 {
0259 return false;
0260 }
0261 static inline void disable_cpufreq(void) { }
0262 #endif
0263
0264 #ifdef CONFIG_CPU_FREQ_STAT
0265 void cpufreq_stats_create_table(struct cpufreq_policy *policy);
0266 void cpufreq_stats_free_table(struct cpufreq_policy *policy);
0267 void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
0268 unsigned int new_freq);
0269 #else
0270 static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
0271 static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
0272 static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
0273 unsigned int new_freq) { }
0274 #endif
0275
0276
0277
0278
0279
0280 #define CPUFREQ_RELATION_L 0
0281 #define CPUFREQ_RELATION_H 1
0282 #define CPUFREQ_RELATION_C 2
0283
0284 #define CPUFREQ_RELATION_E BIT(2)
0285
0286 #define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
0287 #define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
0288 #define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
0289
0290 struct freq_attr {
0291 struct attribute attr;
0292 ssize_t (*show)(struct cpufreq_policy *, char *);
0293 ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
0294 };
0295
0296 #define cpufreq_freq_attr_ro(_name) \
0297 static struct freq_attr _name = \
0298 __ATTR(_name, 0444, show_##_name, NULL)
0299
0300 #define cpufreq_freq_attr_ro_perm(_name, _perm) \
0301 static struct freq_attr _name = \
0302 __ATTR(_name, _perm, show_##_name, NULL)
0303
0304 #define cpufreq_freq_attr_rw(_name) \
0305 static struct freq_attr _name = \
0306 __ATTR(_name, 0644, show_##_name, store_##_name)
0307
0308 #define cpufreq_freq_attr_wo(_name) \
0309 static struct freq_attr _name = \
0310 __ATTR(_name, 0200, NULL, store_##_name)
0311
0312 #define define_one_global_ro(_name) \
0313 static struct kobj_attribute _name = \
0314 __ATTR(_name, 0444, show_##_name, NULL)
0315
0316 #define define_one_global_rw(_name) \
0317 static struct kobj_attribute _name = \
0318 __ATTR(_name, 0644, show_##_name, store_##_name)
0319
0320
0321 struct cpufreq_driver {
0322 char name[CPUFREQ_NAME_LEN];
0323 u16 flags;
0324 void *driver_data;
0325
0326
0327 int (*init)(struct cpufreq_policy *policy);
0328 int (*verify)(struct cpufreq_policy_data *policy);
0329
0330
0331 int (*setpolicy)(struct cpufreq_policy *policy);
0332
0333 int (*target)(struct cpufreq_policy *policy,
0334 unsigned int target_freq,
0335 unsigned int relation);
0336 int (*target_index)(struct cpufreq_policy *policy,
0337 unsigned int index);
0338 unsigned int (*fast_switch)(struct cpufreq_policy *policy,
0339 unsigned int target_freq);
0340
0341
0342
0343
0344
0345 void (*adjust_perf)(unsigned int cpu,
0346 unsigned long min_perf,
0347 unsigned long target_perf,
0348 unsigned long capacity);
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
0366 unsigned int index);
0367 int (*target_intermediate)(struct cpufreq_policy *policy,
0368 unsigned int index);
0369
0370
0371 unsigned int (*get)(unsigned int cpu);
0372
0373
0374 void (*update_limits)(unsigned int cpu);
0375
0376
0377 int (*bios_limit)(int cpu, unsigned int *limit);
0378
0379 int (*online)(struct cpufreq_policy *policy);
0380 int (*offline)(struct cpufreq_policy *policy);
0381 int (*exit)(struct cpufreq_policy *policy);
0382 int (*suspend)(struct cpufreq_policy *policy);
0383 int (*resume)(struct cpufreq_policy *policy);
0384
0385
0386 void (*ready)(struct cpufreq_policy *policy);
0387
0388 struct freq_attr **attr;
0389
0390
0391 bool boost_enabled;
0392 int (*set_boost)(struct cpufreq_policy *policy, int state);
0393
0394
0395
0396
0397
0398 void (*register_em)(struct cpufreq_policy *policy);
0399 };
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409 #define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
0410
0411
0412 #define CPUFREQ_CONST_LOOPS BIT(1)
0413
0414
0415
0416
0417
0418 #define CPUFREQ_IS_COOLING_DEV BIT(2)
0419
0420
0421
0422
0423
0424
0425
0426 #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3)
0427
0428
0429
0430
0431
0432
0433 #define CPUFREQ_ASYNC_NOTIFICATION BIT(4)
0434
0435
0436
0437
0438
0439
0440
0441
0442 #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5)
0443
0444
0445
0446
0447
0448 #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
0449
0450 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
0451 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
0452
0453 bool cpufreq_driver_test_flags(u16 flags);
0454 const char *cpufreq_get_current_driver(void);
0455 void *cpufreq_get_driver_data(void);
0456
0457 static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
0458 {
0459 return IS_ENABLED(CONFIG_CPU_THERMAL) &&
0460 (drv->flags & CPUFREQ_IS_COOLING_DEV);
0461 }
0462
0463 static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
0464 unsigned int min,
0465 unsigned int max)
0466 {
0467 if (policy->min < min)
0468 policy->min = min;
0469 if (policy->max < min)
0470 policy->max = min;
0471 if (policy->min > max)
0472 policy->min = max;
0473 if (policy->max > max)
0474 policy->max = max;
0475 if (policy->min > policy->max)
0476 policy->min = policy->max;
0477 return;
0478 }
0479
0480 static inline void
0481 cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
0482 {
0483 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
0484 policy->cpuinfo.max_freq);
0485 }
0486
0487 #ifdef CONFIG_CPU_FREQ
0488 void cpufreq_suspend(void);
0489 void cpufreq_resume(void);
0490 int cpufreq_generic_suspend(struct cpufreq_policy *policy);
0491 #else
0492 static inline void cpufreq_suspend(void) {}
0493 static inline void cpufreq_resume(void) {}
0494 #endif
0495
0496
0497
0498
0499
0500 #define CPUFREQ_TRANSITION_NOTIFIER (0)
0501 #define CPUFREQ_POLICY_NOTIFIER (1)
0502
0503
0504 #define CPUFREQ_PRECHANGE (0)
0505 #define CPUFREQ_POSTCHANGE (1)
0506
0507
0508 #define CPUFREQ_CREATE_POLICY (0)
0509 #define CPUFREQ_REMOVE_POLICY (1)
0510
0511 #ifdef CONFIG_CPU_FREQ
0512 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
0513 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
0514
0515 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
0516 struct cpufreq_freqs *freqs);
0517 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
0518 struct cpufreq_freqs *freqs, int transition_failed);
0519
0520 #else
0521 static inline int cpufreq_register_notifier(struct notifier_block *nb,
0522 unsigned int list)
0523 {
0524 return 0;
0525 }
0526 static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
0527 unsigned int list)
0528 {
0529 return 0;
0530 }
0531 #endif
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
0544 u_int mult)
0545 {
0546 #if BITS_PER_LONG == 32
0547 u64 result = ((u64) old) * ((u64) mult);
0548 do_div(result, div);
0549 return (unsigned long) result;
0550
0551 #elif BITS_PER_LONG == 64
0552 unsigned long result = old * ((u64) mult);
0553 result /= div;
0554 return result;
0555 #endif
0556 }
0557
0558
0559
0560
0561
0562 #define CPUFREQ_POLICY_UNKNOWN (0)
0563
0564
0565
0566
0567
0568 #define CPUFREQ_POLICY_POWERSAVE (1)
0569 #define CPUFREQ_POLICY_PERFORMANCE (2)
0570
0571
0572
0573
0574
0575
0576
0577 #define LATENCY_MULTIPLIER (1000)
0578
0579 struct cpufreq_governor {
0580 char name[CPUFREQ_NAME_LEN];
0581 int (*init)(struct cpufreq_policy *policy);
0582 void (*exit)(struct cpufreq_policy *policy);
0583 int (*start)(struct cpufreq_policy *policy);
0584 void (*stop)(struct cpufreq_policy *policy);
0585 void (*limits)(struct cpufreq_policy *policy);
0586 ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
0587 char *buf);
0588 int (*store_setspeed) (struct cpufreq_policy *policy,
0589 unsigned int freq);
0590 struct list_head governor_list;
0591 struct module *owner;
0592 u8 flags;
0593 };
0594
0595
0596
0597
0598 #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
0599
0600
0601 #define CPUFREQ_GOV_STRICT_TARGET BIT(1)
0602
0603
0604
0605 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
0606 unsigned int target_freq);
0607 void cpufreq_driver_adjust_perf(unsigned int cpu,
0608 unsigned long min_perf,
0609 unsigned long target_perf,
0610 unsigned long capacity);
0611 bool cpufreq_driver_has_adjust_perf(void);
0612 int cpufreq_driver_target(struct cpufreq_policy *policy,
0613 unsigned int target_freq,
0614 unsigned int relation);
0615 int __cpufreq_driver_target(struct cpufreq_policy *policy,
0616 unsigned int target_freq,
0617 unsigned int relation);
0618 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
0619 unsigned int target_freq);
0620 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
0621 int cpufreq_register_governor(struct cpufreq_governor *governor);
0622 void cpufreq_unregister_governor(struct cpufreq_governor *governor);
0623 int cpufreq_start_governor(struct cpufreq_policy *policy);
0624 void cpufreq_stop_governor(struct cpufreq_policy *policy);
0625
0626 #define cpufreq_governor_init(__governor) \
0627 static int __init __governor##_init(void) \
0628 { \
0629 return cpufreq_register_governor(&__governor); \
0630 } \
0631 core_initcall(__governor##_init)
0632
0633 #define cpufreq_governor_exit(__governor) \
0634 static void __exit __governor##_exit(void) \
0635 { \
0636 return cpufreq_unregister_governor(&__governor); \
0637 } \
0638 module_exit(__governor##_exit)
0639
0640 struct cpufreq_governor *cpufreq_default_governor(void);
0641 struct cpufreq_governor *cpufreq_fallback_governor(void);
0642
0643 static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
0644 {
0645 if (policy->max < policy->cur)
0646 __cpufreq_driver_target(policy, policy->max,
0647 CPUFREQ_RELATION_HE);
0648 else if (policy->min > policy->cur)
0649 __cpufreq_driver_target(policy, policy->min,
0650 CPUFREQ_RELATION_LE);
0651 }
0652
0653
0654 struct gov_attr_set {
0655 struct kobject kobj;
0656 struct list_head policy_list;
0657 struct mutex update_lock;
0658 int usage_count;
0659 };
0660
0661
0662 extern const struct sysfs_ops governor_sysfs_ops;
0663
0664 static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
0665 {
0666 return container_of(kobj, struct gov_attr_set, kobj);
0667 }
0668
0669 void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
0670 void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
0671 unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
0672
0673
0674 struct governor_attr {
0675 struct attribute attr;
0676 ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
0677 ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
0678 size_t count);
0679 };
0680
0681
0682
0683
0684
0685
0686 #define CPUFREQ_ENTRY_INVALID ~0u
0687 #define CPUFREQ_TABLE_END ~1u
0688
0689 #define CPUFREQ_BOOST_FREQ (1 << 0)
0690 #define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
0691
0692 struct cpufreq_frequency_table {
0693 unsigned int flags;
0694 unsigned int driver_data;
0695 unsigned int frequency;
0696
0697 };
0698
0699 #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
0700 int dev_pm_opp_init_cpufreq_table(struct device *dev,
0701 struct cpufreq_frequency_table **table);
0702 void dev_pm_opp_free_cpufreq_table(struct device *dev,
0703 struct cpufreq_frequency_table **table);
0704 #else
0705 static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
0706 struct cpufreq_frequency_table
0707 **table)
0708 {
0709 return -EINVAL;
0710 }
0711
0712 static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
0713 struct cpufreq_frequency_table
0714 **table)
0715 {
0716 }
0717 #endif
0718
0719
0720
0721
0722
0723
0724
0725 #define cpufreq_for_each_entry(pos, table) \
0726 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 #define cpufreq_for_each_entry_idx(pos, table, idx) \
0737 for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
0738 pos++, idx++)
0739
0740
0741
0742
0743
0744
0745
0746
0747 #define cpufreq_for_each_valid_entry(pos, table) \
0748 for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \
0749 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
0750 continue; \
0751 else
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761 #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
0762 cpufreq_for_each_entry_idx(pos, table, idx) \
0763 if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
0764 continue; \
0765 else
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777 #define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
0778 cpufreq_for_each_valid_entry_idx(pos, table, idx) \
0779 if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
0780 continue; \
0781 else
0782
0783
0784 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
0785 struct cpufreq_frequency_table *table);
0786
0787 int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
0788 struct cpufreq_frequency_table *table);
0789 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
0790
0791 int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
0792 unsigned int target_freq,
0793 unsigned int relation);
0794 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
0795 unsigned int freq);
0796
0797 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
0798
0799 #ifdef CONFIG_CPU_FREQ
0800 int cpufreq_boost_trigger_state(int state);
0801 int cpufreq_boost_enabled(void);
0802 int cpufreq_enable_boost_support(void);
0803 bool policy_has_boost_freq(struct cpufreq_policy *policy);
0804
0805
0806 static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
0807 unsigned int target_freq,
0808 bool efficiencies)
0809 {
0810 struct cpufreq_frequency_table *table = policy->freq_table;
0811 struct cpufreq_frequency_table *pos;
0812 unsigned int freq;
0813 int idx, best = -1;
0814
0815 cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
0816 freq = pos->frequency;
0817
0818 if (freq >= target_freq)
0819 return idx;
0820
0821 best = idx;
0822 }
0823
0824 return best;
0825 }
0826
0827
0828 static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
0829 unsigned int target_freq,
0830 bool efficiencies)
0831 {
0832 struct cpufreq_frequency_table *table = policy->freq_table;
0833 struct cpufreq_frequency_table *pos;
0834 unsigned int freq;
0835 int idx, best = -1;
0836
0837 cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
0838 freq = pos->frequency;
0839
0840 if (freq == target_freq)
0841 return idx;
0842
0843 if (freq > target_freq) {
0844 best = idx;
0845 continue;
0846 }
0847
0848
0849 if (best == -1)
0850 return idx;
0851
0852 return best;
0853 }
0854
0855 return best;
0856 }
0857
0858
0859 static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
0860 unsigned int target_freq,
0861 bool efficiencies)
0862 {
0863 target_freq = clamp_val(target_freq, policy->min, policy->max);
0864
0865 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
0866 return cpufreq_table_find_index_al(policy, target_freq,
0867 efficiencies);
0868 else
0869 return cpufreq_table_find_index_dl(policy, target_freq,
0870 efficiencies);
0871 }
0872
0873
0874 static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
0875 unsigned int target_freq,
0876 bool efficiencies)
0877 {
0878 struct cpufreq_frequency_table *table = policy->freq_table;
0879 struct cpufreq_frequency_table *pos;
0880 unsigned int freq;
0881 int idx, best = -1;
0882
0883 cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
0884 freq = pos->frequency;
0885
0886 if (freq == target_freq)
0887 return idx;
0888
0889 if (freq < target_freq) {
0890 best = idx;
0891 continue;
0892 }
0893
0894
0895 if (best == -1)
0896 return idx;
0897
0898 return best;
0899 }
0900
0901 return best;
0902 }
0903
0904
0905 static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
0906 unsigned int target_freq,
0907 bool efficiencies)
0908 {
0909 struct cpufreq_frequency_table *table = policy->freq_table;
0910 struct cpufreq_frequency_table *pos;
0911 unsigned int freq;
0912 int idx, best = -1;
0913
0914 cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
0915 freq = pos->frequency;
0916
0917 if (freq <= target_freq)
0918 return idx;
0919
0920 best = idx;
0921 }
0922
0923 return best;
0924 }
0925
0926
0927 static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
0928 unsigned int target_freq,
0929 bool efficiencies)
0930 {
0931 target_freq = clamp_val(target_freq, policy->min, policy->max);
0932
0933 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
0934 return cpufreq_table_find_index_ah(policy, target_freq,
0935 efficiencies);
0936 else
0937 return cpufreq_table_find_index_dh(policy, target_freq,
0938 efficiencies);
0939 }
0940
0941
0942 static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
0943 unsigned int target_freq,
0944 bool efficiencies)
0945 {
0946 struct cpufreq_frequency_table *table = policy->freq_table;
0947 struct cpufreq_frequency_table *pos;
0948 unsigned int freq;
0949 int idx, best = -1;
0950
0951 cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
0952 freq = pos->frequency;
0953
0954 if (freq == target_freq)
0955 return idx;
0956
0957 if (freq < target_freq) {
0958 best = idx;
0959 continue;
0960 }
0961
0962
0963 if (best == -1)
0964 return idx;
0965
0966
0967 if (target_freq - table[best].frequency > freq - target_freq)
0968 return idx;
0969
0970 return best;
0971 }
0972
0973 return best;
0974 }
0975
0976
0977 static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
0978 unsigned int target_freq,
0979 bool efficiencies)
0980 {
0981 struct cpufreq_frequency_table *table = policy->freq_table;
0982 struct cpufreq_frequency_table *pos;
0983 unsigned int freq;
0984 int idx, best = -1;
0985
0986 cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
0987 freq = pos->frequency;
0988
0989 if (freq == target_freq)
0990 return idx;
0991
0992 if (freq > target_freq) {
0993 best = idx;
0994 continue;
0995 }
0996
0997
0998 if (best == -1)
0999 return idx;
1000
1001
1002 if (table[best].frequency - target_freq > target_freq - freq)
1003 return idx;
1004
1005 return best;
1006 }
1007
1008 return best;
1009 }
1010
1011
1012 static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
1013 unsigned int target_freq,
1014 bool efficiencies)
1015 {
1016 target_freq = clamp_val(target_freq, policy->min, policy->max);
1017
1018 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
1019 return cpufreq_table_find_index_ac(policy, target_freq,
1020 efficiencies);
1021 else
1022 return cpufreq_table_find_index_dc(policy, target_freq,
1023 efficiencies);
1024 }
1025
1026 static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
1027 unsigned int target_freq,
1028 unsigned int relation)
1029 {
1030 bool efficiencies = policy->efficiencies_available &&
1031 (relation & CPUFREQ_RELATION_E);
1032 int idx;
1033
1034
1035 relation &= ~CPUFREQ_RELATION_E;
1036
1037 if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
1038 return cpufreq_table_index_unsorted(policy, target_freq,
1039 relation);
1040 retry:
1041 switch (relation) {
1042 case CPUFREQ_RELATION_L:
1043 idx = cpufreq_table_find_index_l(policy, target_freq,
1044 efficiencies);
1045 break;
1046 case CPUFREQ_RELATION_H:
1047 idx = cpufreq_table_find_index_h(policy, target_freq,
1048 efficiencies);
1049 break;
1050 case CPUFREQ_RELATION_C:
1051 idx = cpufreq_table_find_index_c(policy, target_freq,
1052 efficiencies);
1053 break;
1054 default:
1055 WARN_ON_ONCE(1);
1056 return 0;
1057 }
1058
1059 if (idx < 0 && efficiencies) {
1060 efficiencies = false;
1061 goto retry;
1062 }
1063
1064 return idx;
1065 }
1066
1067 static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
1068 {
1069 struct cpufreq_frequency_table *pos;
1070 int count = 0;
1071
1072 if (unlikely(!policy->freq_table))
1073 return 0;
1074
1075 cpufreq_for_each_valid_entry(pos, policy->freq_table)
1076 count++;
1077
1078 return count;
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static inline int
1092 cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
1093 unsigned int frequency)
1094 {
1095 struct cpufreq_frequency_table *pos;
1096
1097
1098 if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
1099 return -EINVAL;
1100
1101 cpufreq_for_each_valid_entry(pos, policy->freq_table) {
1102 if (pos->frequency == frequency) {
1103 pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
1104 policy->efficiencies_available = true;
1105 return 0;
1106 }
1107 }
1108
1109 return -EINVAL;
1110 }
1111
1112 static inline int parse_perf_domain(int cpu, const char *list_name,
1113 const char *cell_name)
1114 {
1115 struct device_node *cpu_np;
1116 struct of_phandle_args args;
1117 int ret;
1118
1119 cpu_np = of_cpu_device_node_get(cpu);
1120 if (!cpu_np)
1121 return -ENODEV;
1122
1123 ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
1124 &args);
1125 if (ret < 0)
1126 return ret;
1127
1128 of_node_put(cpu_np);
1129
1130 return args.args[0];
1131 }
1132
1133 static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
1134 const char *cell_name, struct cpumask *cpumask)
1135 {
1136 int target_idx;
1137 int cpu, ret;
1138
1139 ret = parse_perf_domain(pcpu, list_name, cell_name);
1140 if (ret < 0)
1141 return ret;
1142
1143 target_idx = ret;
1144 cpumask_set_cpu(pcpu, cpumask);
1145
1146 for_each_possible_cpu(cpu) {
1147 if (cpu == pcpu)
1148 continue;
1149
1150 ret = parse_perf_domain(cpu, list_name, cell_name);
1151 if (ret < 0)
1152 continue;
1153
1154 if (target_idx == ret)
1155 cpumask_set_cpu(cpu, cpumask);
1156 }
1157
1158 return target_idx;
1159 }
1160 #else
1161 static inline int cpufreq_boost_trigger_state(int state)
1162 {
1163 return 0;
1164 }
1165 static inline int cpufreq_boost_enabled(void)
1166 {
1167 return 0;
1168 }
1169
1170 static inline int cpufreq_enable_boost_support(void)
1171 {
1172 return -EINVAL;
1173 }
1174
1175 static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
1176 {
1177 return false;
1178 }
1179
1180 static inline int
1181 cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
1182 unsigned int frequency)
1183 {
1184 return -EINVAL;
1185 }
1186
1187 static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
1188 const char *cell_name, struct cpumask *cpumask)
1189 {
1190 return -EOPNOTSUPP;
1191 }
1192 #endif
1193
1194 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
1195 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
1196 struct cpufreq_governor *old_gov);
1197 #else
1198 static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
1199 struct cpufreq_governor *old_gov) { }
1200 #endif
1201
1202 extern unsigned int arch_freq_get_on_cpu(int cpu);
1203
1204 #ifndef arch_set_freq_scale
1205 static __always_inline
1206 void arch_set_freq_scale(const struct cpumask *cpus,
1207 unsigned long cur_freq,
1208 unsigned long max_freq)
1209 {
1210 }
1211 #endif
1212
1213 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
1214 extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
1215 extern struct freq_attr *cpufreq_generic_attr[];
1216 int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
1217
1218 unsigned int cpufreq_generic_get(unsigned int cpu);
1219 void cpufreq_generic_init(struct cpufreq_policy *policy,
1220 struct cpufreq_frequency_table *table,
1221 unsigned int transition_latency);
1222
1223 static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
1224 {
1225 dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
1226 policy->related_cpus);
1227 }
1228 #endif