0001
0002
0003
0004
0005
0006
0007
0008
0009 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
0010
0011 struct sugov_tunables {
0012 struct gov_attr_set attr_set;
0013 unsigned int rate_limit_us;
0014 };
0015
0016 struct sugov_policy {
0017 struct cpufreq_policy *policy;
0018
0019 struct sugov_tunables *tunables;
0020 struct list_head tunables_hook;
0021
0022 raw_spinlock_t update_lock;
0023 u64 last_freq_update_time;
0024 s64 freq_update_delay_ns;
0025 unsigned int next_freq;
0026 unsigned int cached_raw_freq;
0027
0028
0029 struct irq_work irq_work;
0030 struct kthread_work work;
0031 struct mutex work_lock;
0032 struct kthread_worker worker;
0033 struct task_struct *thread;
0034 bool work_in_progress;
0035
0036 bool limits_changed;
0037 bool need_freq_update;
0038 };
0039
0040 struct sugov_cpu {
0041 struct update_util_data update_util;
0042 struct sugov_policy *sg_policy;
0043 unsigned int cpu;
0044
0045 bool iowait_boost_pending;
0046 unsigned int iowait_boost;
0047 u64 last_update;
0048
0049 unsigned long util;
0050 unsigned long bw_dl;
0051 unsigned long max;
0052
0053
0054 #ifdef CONFIG_NO_HZ_COMMON
0055 unsigned long saved_idle_calls;
0056 #endif
0057 };
0058
0059 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
0060
0061
0062
0063 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
0064 {
0065 s64 delta_ns;
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
0083 return false;
0084
0085 if (unlikely(sg_policy->limits_changed)) {
0086 sg_policy->limits_changed = false;
0087 sg_policy->need_freq_update = true;
0088 return true;
0089 }
0090
0091 delta_ns = time - sg_policy->last_freq_update_time;
0092
0093 return delta_ns >= sg_policy->freq_update_delay_ns;
0094 }
0095
0096 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
0097 unsigned int next_freq)
0098 {
0099 if (sg_policy->need_freq_update)
0100 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
0101 else if (sg_policy->next_freq == next_freq)
0102 return false;
0103
0104 sg_policy->next_freq = next_freq;
0105 sg_policy->last_freq_update_time = time;
0106
0107 return true;
0108 }
0109
0110 static void sugov_deferred_update(struct sugov_policy *sg_policy)
0111 {
0112 if (!sg_policy->work_in_progress) {
0113 sg_policy->work_in_progress = true;
0114 irq_work_queue(&sg_policy->irq_work);
0115 }
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
0141 unsigned long util, unsigned long max)
0142 {
0143 struct cpufreq_policy *policy = sg_policy->policy;
0144 unsigned int freq = arch_scale_freq_invariant() ?
0145 policy->cpuinfo.max_freq : policy->cur;
0146
0147 util = map_util_perf(util);
0148 freq = map_util_freq(util, freq, max);
0149
0150 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
0151 return sg_policy->next_freq;
0152
0153 sg_policy->cached_raw_freq = freq;
0154 return cpufreq_driver_resolve_freq(policy, freq);
0155 }
0156
0157 static void sugov_get_util(struct sugov_cpu *sg_cpu)
0158 {
0159 struct rq *rq = cpu_rq(sg_cpu->cpu);
0160
0161 sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
0162 sg_cpu->bw_dl = cpu_bw_dl(rq);
0163 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
0164 FREQUENCY_UTIL, NULL);
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
0179 bool set_iowait_boost)
0180 {
0181 s64 delta_ns = time - sg_cpu->last_update;
0182
0183
0184 if (delta_ns <= TICK_NSEC)
0185 return false;
0186
0187 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
0188 sg_cpu->iowait_boost_pending = set_iowait_boost;
0189
0190 return true;
0191 }
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
0208 unsigned int flags)
0209 {
0210 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
0211
0212
0213 if (sg_cpu->iowait_boost &&
0214 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
0215 return;
0216
0217
0218 if (!set_iowait_boost)
0219 return;
0220
0221
0222 if (sg_cpu->iowait_boost_pending)
0223 return;
0224 sg_cpu->iowait_boost_pending = true;
0225
0226
0227 if (sg_cpu->iowait_boost) {
0228 sg_cpu->iowait_boost =
0229 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
0230 return;
0231 }
0232
0233
0234 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
0235 }
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
0255 {
0256 unsigned long boost;
0257
0258
0259 if (!sg_cpu->iowait_boost)
0260 return;
0261
0262
0263 if (sugov_iowait_reset(sg_cpu, time, false))
0264 return;
0265
0266 if (!sg_cpu->iowait_boost_pending) {
0267
0268
0269
0270 sg_cpu->iowait_boost >>= 1;
0271 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
0272 sg_cpu->iowait_boost = 0;
0273 return;
0274 }
0275 }
0276
0277 sg_cpu->iowait_boost_pending = false;
0278
0279
0280
0281
0282
0283 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
0284 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
0285 if (sg_cpu->util < boost)
0286 sg_cpu->util = boost;
0287 }
0288
0289 #ifdef CONFIG_NO_HZ_COMMON
0290 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
0291 {
0292 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
0293 bool ret = idle_calls == sg_cpu->saved_idle_calls;
0294
0295 sg_cpu->saved_idle_calls = idle_calls;
0296 return ret;
0297 }
0298 #else
0299 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
0300 #endif
0301
0302
0303
0304
0305
0306 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
0307 {
0308 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
0309 sg_cpu->sg_policy->limits_changed = true;
0310 }
0311
0312 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
0313 u64 time, unsigned int flags)
0314 {
0315 sugov_iowait_boost(sg_cpu, time, flags);
0316 sg_cpu->last_update = time;
0317
0318 ignore_dl_rate_limit(sg_cpu);
0319
0320 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
0321 return false;
0322
0323 sugov_get_util(sg_cpu);
0324 sugov_iowait_apply(sg_cpu, time);
0325
0326 return true;
0327 }
0328
0329 static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
0330 unsigned int flags)
0331 {
0332 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
0333 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
0334 unsigned int cached_freq = sg_policy->cached_raw_freq;
0335 unsigned int next_f;
0336
0337 if (!sugov_update_single_common(sg_cpu, time, flags))
0338 return;
0339
0340 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
0341
0342
0343
0344
0345
0346
0347 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
0348 sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
0349 next_f = sg_policy->next_freq;
0350
0351
0352 sg_policy->cached_raw_freq = cached_freq;
0353 }
0354
0355 if (!sugov_update_next_freq(sg_policy, time, next_f))
0356 return;
0357
0358
0359
0360
0361
0362
0363 if (sg_policy->policy->fast_switch_enabled) {
0364 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
0365 } else {
0366 raw_spin_lock(&sg_policy->update_lock);
0367 sugov_deferred_update(sg_policy);
0368 raw_spin_unlock(&sg_policy->update_lock);
0369 }
0370 }
0371
0372 static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
0373 unsigned int flags)
0374 {
0375 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
0376 unsigned long prev_util = sg_cpu->util;
0377
0378
0379
0380
0381
0382
0383 if (!arch_scale_freq_invariant()) {
0384 sugov_update_single_freq(hook, time, flags);
0385 return;
0386 }
0387
0388 if (!sugov_update_single_common(sg_cpu, time, flags))
0389 return;
0390
0391
0392
0393
0394
0395
0396
0397 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
0398 sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
0399 sg_cpu->util = prev_util;
0400
0401 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
0402 map_util_perf(sg_cpu->util), sg_cpu->max);
0403
0404 sg_cpu->sg_policy->last_freq_update_time = time;
0405 }
0406
0407 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
0408 {
0409 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
0410 struct cpufreq_policy *policy = sg_policy->policy;
0411 unsigned long util = 0, max = 1;
0412 unsigned int j;
0413
0414 for_each_cpu(j, policy->cpus) {
0415 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
0416 unsigned long j_util, j_max;
0417
0418 sugov_get_util(j_sg_cpu);
0419 sugov_iowait_apply(j_sg_cpu, time);
0420 j_util = j_sg_cpu->util;
0421 j_max = j_sg_cpu->max;
0422
0423 if (j_util * max > j_max * util) {
0424 util = j_util;
0425 max = j_max;
0426 }
0427 }
0428
0429 return get_next_freq(sg_policy, util, max);
0430 }
0431
0432 static void
0433 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
0434 {
0435 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
0436 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
0437 unsigned int next_f;
0438
0439 raw_spin_lock(&sg_policy->update_lock);
0440
0441 sugov_iowait_boost(sg_cpu, time, flags);
0442 sg_cpu->last_update = time;
0443
0444 ignore_dl_rate_limit(sg_cpu);
0445
0446 if (sugov_should_update_freq(sg_policy, time)) {
0447 next_f = sugov_next_freq_shared(sg_cpu, time);
0448
0449 if (!sugov_update_next_freq(sg_policy, time, next_f))
0450 goto unlock;
0451
0452 if (sg_policy->policy->fast_switch_enabled)
0453 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
0454 else
0455 sugov_deferred_update(sg_policy);
0456 }
0457 unlock:
0458 raw_spin_unlock(&sg_policy->update_lock);
0459 }
0460
0461 static void sugov_work(struct kthread_work *work)
0462 {
0463 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
0464 unsigned int freq;
0465 unsigned long flags;
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
0478 freq = sg_policy->next_freq;
0479 sg_policy->work_in_progress = false;
0480 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
0481
0482 mutex_lock(&sg_policy->work_lock);
0483 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
0484 mutex_unlock(&sg_policy->work_lock);
0485 }
0486
0487 static void sugov_irq_work(struct irq_work *irq_work)
0488 {
0489 struct sugov_policy *sg_policy;
0490
0491 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
0492
0493 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
0494 }
0495
0496
0497
0498 static struct sugov_tunables *global_tunables;
0499 static DEFINE_MUTEX(global_tunables_lock);
0500
0501 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
0502 {
0503 return container_of(attr_set, struct sugov_tunables, attr_set);
0504 }
0505
0506 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
0507 {
0508 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
0509
0510 return sprintf(buf, "%u\n", tunables->rate_limit_us);
0511 }
0512
0513 static ssize_t
0514 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
0515 {
0516 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
0517 struct sugov_policy *sg_policy;
0518 unsigned int rate_limit_us;
0519
0520 if (kstrtouint(buf, 10, &rate_limit_us))
0521 return -EINVAL;
0522
0523 tunables->rate_limit_us = rate_limit_us;
0524
0525 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
0526 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
0527
0528 return count;
0529 }
0530
0531 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
0532
0533 static struct attribute *sugov_attrs[] = {
0534 &rate_limit_us.attr,
0535 NULL
0536 };
0537 ATTRIBUTE_GROUPS(sugov);
0538
0539 static void sugov_tunables_free(struct kobject *kobj)
0540 {
0541 struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
0542
0543 kfree(to_sugov_tunables(attr_set));
0544 }
0545
0546 static struct kobj_type sugov_tunables_ktype = {
0547 .default_groups = sugov_groups,
0548 .sysfs_ops = &governor_sysfs_ops,
0549 .release = &sugov_tunables_free,
0550 };
0551
0552
0553
0554 struct cpufreq_governor schedutil_gov;
0555
0556 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
0557 {
0558 struct sugov_policy *sg_policy;
0559
0560 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
0561 if (!sg_policy)
0562 return NULL;
0563
0564 sg_policy->policy = policy;
0565 raw_spin_lock_init(&sg_policy->update_lock);
0566 return sg_policy;
0567 }
0568
0569 static void sugov_policy_free(struct sugov_policy *sg_policy)
0570 {
0571 kfree(sg_policy);
0572 }
0573
0574 static int sugov_kthread_create(struct sugov_policy *sg_policy)
0575 {
0576 struct task_struct *thread;
0577 struct sched_attr attr = {
0578 .size = sizeof(struct sched_attr),
0579 .sched_policy = SCHED_DEADLINE,
0580 .sched_flags = SCHED_FLAG_SUGOV,
0581 .sched_nice = 0,
0582 .sched_priority = 0,
0583
0584
0585
0586
0587 .sched_runtime = 1000000,
0588 .sched_deadline = 10000000,
0589 .sched_period = 10000000,
0590 };
0591 struct cpufreq_policy *policy = sg_policy->policy;
0592 int ret;
0593
0594
0595 if (policy->fast_switch_enabled)
0596 return 0;
0597
0598 kthread_init_work(&sg_policy->work, sugov_work);
0599 kthread_init_worker(&sg_policy->worker);
0600 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
0601 "sugov:%d",
0602 cpumask_first(policy->related_cpus));
0603 if (IS_ERR(thread)) {
0604 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
0605 return PTR_ERR(thread);
0606 }
0607
0608 ret = sched_setattr_nocheck(thread, &attr);
0609 if (ret) {
0610 kthread_stop(thread);
0611 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
0612 return ret;
0613 }
0614
0615 sg_policy->thread = thread;
0616 kthread_bind_mask(thread, policy->related_cpus);
0617 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
0618 mutex_init(&sg_policy->work_lock);
0619
0620 wake_up_process(thread);
0621
0622 return 0;
0623 }
0624
0625 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
0626 {
0627
0628 if (sg_policy->policy->fast_switch_enabled)
0629 return;
0630
0631 kthread_flush_worker(&sg_policy->worker);
0632 kthread_stop(sg_policy->thread);
0633 mutex_destroy(&sg_policy->work_lock);
0634 }
0635
0636 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
0637 {
0638 struct sugov_tunables *tunables;
0639
0640 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
0641 if (tunables) {
0642 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
0643 if (!have_governor_per_policy())
0644 global_tunables = tunables;
0645 }
0646 return tunables;
0647 }
0648
0649 static void sugov_clear_global_tunables(void)
0650 {
0651 if (!have_governor_per_policy())
0652 global_tunables = NULL;
0653 }
0654
0655 static int sugov_init(struct cpufreq_policy *policy)
0656 {
0657 struct sugov_policy *sg_policy;
0658 struct sugov_tunables *tunables;
0659 int ret = 0;
0660
0661
0662 if (policy->governor_data)
0663 return -EBUSY;
0664
0665 cpufreq_enable_fast_switch(policy);
0666
0667 sg_policy = sugov_policy_alloc(policy);
0668 if (!sg_policy) {
0669 ret = -ENOMEM;
0670 goto disable_fast_switch;
0671 }
0672
0673 ret = sugov_kthread_create(sg_policy);
0674 if (ret)
0675 goto free_sg_policy;
0676
0677 mutex_lock(&global_tunables_lock);
0678
0679 if (global_tunables) {
0680 if (WARN_ON(have_governor_per_policy())) {
0681 ret = -EINVAL;
0682 goto stop_kthread;
0683 }
0684 policy->governor_data = sg_policy;
0685 sg_policy->tunables = global_tunables;
0686
0687 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
0688 goto out;
0689 }
0690
0691 tunables = sugov_tunables_alloc(sg_policy);
0692 if (!tunables) {
0693 ret = -ENOMEM;
0694 goto stop_kthread;
0695 }
0696
0697 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
0698
0699 policy->governor_data = sg_policy;
0700 sg_policy->tunables = tunables;
0701
0702 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
0703 get_governor_parent_kobj(policy), "%s",
0704 schedutil_gov.name);
0705 if (ret)
0706 goto fail;
0707
0708 out:
0709 mutex_unlock(&global_tunables_lock);
0710 return 0;
0711
0712 fail:
0713 kobject_put(&tunables->attr_set.kobj);
0714 policy->governor_data = NULL;
0715 sugov_clear_global_tunables();
0716
0717 stop_kthread:
0718 sugov_kthread_stop(sg_policy);
0719 mutex_unlock(&global_tunables_lock);
0720
0721 free_sg_policy:
0722 sugov_policy_free(sg_policy);
0723
0724 disable_fast_switch:
0725 cpufreq_disable_fast_switch(policy);
0726
0727 pr_err("initialization failed (error %d)\n", ret);
0728 return ret;
0729 }
0730
0731 static void sugov_exit(struct cpufreq_policy *policy)
0732 {
0733 struct sugov_policy *sg_policy = policy->governor_data;
0734 struct sugov_tunables *tunables = sg_policy->tunables;
0735 unsigned int count;
0736
0737 mutex_lock(&global_tunables_lock);
0738
0739 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
0740 policy->governor_data = NULL;
0741 if (!count)
0742 sugov_clear_global_tunables();
0743
0744 mutex_unlock(&global_tunables_lock);
0745
0746 sugov_kthread_stop(sg_policy);
0747 sugov_policy_free(sg_policy);
0748 cpufreq_disable_fast_switch(policy);
0749 }
0750
0751 static int sugov_start(struct cpufreq_policy *policy)
0752 {
0753 struct sugov_policy *sg_policy = policy->governor_data;
0754 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
0755 unsigned int cpu;
0756
0757 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
0758 sg_policy->last_freq_update_time = 0;
0759 sg_policy->next_freq = 0;
0760 sg_policy->work_in_progress = false;
0761 sg_policy->limits_changed = false;
0762 sg_policy->cached_raw_freq = 0;
0763
0764 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
0765
0766 for_each_cpu(cpu, policy->cpus) {
0767 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
0768
0769 memset(sg_cpu, 0, sizeof(*sg_cpu));
0770 sg_cpu->cpu = cpu;
0771 sg_cpu->sg_policy = sg_policy;
0772 }
0773
0774 if (policy_is_shared(policy))
0775 uu = sugov_update_shared;
0776 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
0777 uu = sugov_update_single_perf;
0778 else
0779 uu = sugov_update_single_freq;
0780
0781 for_each_cpu(cpu, policy->cpus) {
0782 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
0783
0784 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
0785 }
0786 return 0;
0787 }
0788
0789 static void sugov_stop(struct cpufreq_policy *policy)
0790 {
0791 struct sugov_policy *sg_policy = policy->governor_data;
0792 unsigned int cpu;
0793
0794 for_each_cpu(cpu, policy->cpus)
0795 cpufreq_remove_update_util_hook(cpu);
0796
0797 synchronize_rcu();
0798
0799 if (!policy->fast_switch_enabled) {
0800 irq_work_sync(&sg_policy->irq_work);
0801 kthread_cancel_work_sync(&sg_policy->work);
0802 }
0803 }
0804
0805 static void sugov_limits(struct cpufreq_policy *policy)
0806 {
0807 struct sugov_policy *sg_policy = policy->governor_data;
0808
0809 if (!policy->fast_switch_enabled) {
0810 mutex_lock(&sg_policy->work_lock);
0811 cpufreq_policy_apply_limits(policy);
0812 mutex_unlock(&sg_policy->work_lock);
0813 }
0814
0815 sg_policy->limits_changed = true;
0816 }
0817
0818 struct cpufreq_governor schedutil_gov = {
0819 .name = "schedutil",
0820 .owner = THIS_MODULE,
0821 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
0822 .init = sugov_init,
0823 .exit = sugov_exit,
0824 .start = sugov_start,
0825 .stop = sugov_stop,
0826 .limits = sugov_limits,
0827 };
0828
0829 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
0830 struct cpufreq_governor *cpufreq_default_governor(void)
0831 {
0832 return &schedutil_gov;
0833 }
0834 #endif
0835
0836 cpufreq_governor_init(schedutil_gov);
0837
0838 #ifdef CONFIG_ENERGY_MODEL
0839 static void rebuild_sd_workfn(struct work_struct *work)
0840 {
0841 rebuild_sched_domains_energy();
0842 }
0843 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
0844
0845
0846
0847
0848
0849 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
0850 struct cpufreq_governor *old_gov)
0851 {
0852 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
0853
0854
0855
0856
0857
0858 schedule_work(&rebuild_sd_work);
0859 }
0860
0861 }
0862 #endif