Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * drivers/cpufreq/cpufreq_governor.c
0004  *
0005  * CPUFREQ governors common code
0006  *
0007  * Copyright    (C) 2001 Russell King
0008  *      (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
0009  *      (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
0010  *      (C) 2009 Alexander Clouter <alex@digriz.org.uk>
0011  *      (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
0012  */
0013 
0014 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0015 
0016 #include <linux/export.h>
0017 #include <linux/kernel_stat.h>
0018 #include <linux/slab.h>
0019 
0020 #include "cpufreq_governor.h"
0021 
0022 #define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL   (2 * TICK_NSEC / NSEC_PER_USEC)
0023 
0024 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
0025 
0026 static DEFINE_MUTEX(gov_dbs_data_mutex);
0027 
0028 /* Common sysfs tunables */
0029 /*
0030  * sampling_rate_store - update sampling rate effective immediately if needed.
0031  *
0032  * If new rate is smaller than the old, simply updating
0033  * dbs.sampling_rate might not be appropriate. For example, if the
0034  * original sampling_rate was 1 second and the requested new sampling rate is 10
0035  * ms because the user needs immediate reaction from ondemand governor, but not
0036  * sure if higher frequency will be required or not, then, the governor may
0037  * change the sampling rate too late; up to 1 second later. Thus, if we are
0038  * reducing the sampling rate, we need to make the new value effective
0039  * immediately.
0040  *
0041  * This must be called with dbs_data->mutex held, otherwise traversing
0042  * policy_dbs_list isn't safe.
0043  */
0044 ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
0045                 size_t count)
0046 {
0047     struct dbs_data *dbs_data = to_dbs_data(attr_set);
0048     struct policy_dbs_info *policy_dbs;
0049     unsigned int sampling_interval;
0050     int ret;
0051 
0052     ret = sscanf(buf, "%u", &sampling_interval);
0053     if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
0054         return -EINVAL;
0055 
0056     dbs_data->sampling_rate = sampling_interval;
0057 
0058     /*
0059      * We are operating under dbs_data->mutex and so the list and its
0060      * entries can't be freed concurrently.
0061      */
0062     list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
0063         mutex_lock(&policy_dbs->update_mutex);
0064         /*
0065          * On 32-bit architectures this may race with the
0066          * sample_delay_ns read in dbs_update_util_handler(), but that
0067          * really doesn't matter.  If the read returns a value that's
0068          * too big, the sample will be skipped, but the next invocation
0069          * of dbs_update_util_handler() (when the update has been
0070          * completed) will take a sample.
0071          *
0072          * If this runs in parallel with dbs_work_handler(), we may end
0073          * up overwriting the sample_delay_ns value that it has just
0074          * written, but it will be corrected next time a sample is
0075          * taken, so it shouldn't be significant.
0076          */
0077         gov_update_sample_delay(policy_dbs, 0);
0078         mutex_unlock(&policy_dbs->update_mutex);
0079     }
0080 
0081     return count;
0082 }
0083 EXPORT_SYMBOL_GPL(sampling_rate_store);
0084 
0085 /**
0086  * gov_update_cpu_data - Update CPU load data.
0087  * @dbs_data: Top-level governor data pointer.
0088  *
0089  * Update CPU load data for all CPUs in the domain governed by @dbs_data
0090  * (that may be a single policy or a bunch of them if governor tunables are
0091  * system-wide).
0092  *
0093  * Call under the @dbs_data mutex.
0094  */
0095 void gov_update_cpu_data(struct dbs_data *dbs_data)
0096 {
0097     struct policy_dbs_info *policy_dbs;
0098 
0099     list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
0100         unsigned int j;
0101 
0102         for_each_cpu(j, policy_dbs->policy->cpus) {
0103             struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0104 
0105             j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
0106                                   dbs_data->io_is_busy);
0107             if (dbs_data->ignore_nice_load)
0108                 j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
0109         }
0110     }
0111 }
0112 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
0113 
0114 unsigned int dbs_update(struct cpufreq_policy *policy)
0115 {
0116     struct policy_dbs_info *policy_dbs = policy->governor_data;
0117     struct dbs_data *dbs_data = policy_dbs->dbs_data;
0118     unsigned int ignore_nice = dbs_data->ignore_nice_load;
0119     unsigned int max_load = 0, idle_periods = UINT_MAX;
0120     unsigned int sampling_rate, io_busy, j;
0121 
0122     /*
0123      * Sometimes governors may use an additional multiplier to increase
0124      * sample delays temporarily.  Apply that multiplier to sampling_rate
0125      * so as to keep the wake-up-from-idle detection logic a bit
0126      * conservative.
0127      */
0128     sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
0129     /*
0130      * For the purpose of ondemand, waiting for disk IO is an indication
0131      * that you're performance critical, and not that the system is actually
0132      * idle, so do not add the iowait time to the CPU idle time then.
0133      */
0134     io_busy = dbs_data->io_is_busy;
0135 
0136     /* Get Absolute Load */
0137     for_each_cpu(j, policy->cpus) {
0138         struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0139         u64 update_time, cur_idle_time;
0140         unsigned int idle_time, time_elapsed;
0141         unsigned int load;
0142 
0143         cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
0144 
0145         time_elapsed = update_time - j_cdbs->prev_update_time;
0146         j_cdbs->prev_update_time = update_time;
0147 
0148         idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
0149         j_cdbs->prev_cpu_idle = cur_idle_time;
0150 
0151         if (ignore_nice) {
0152             u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
0153 
0154             idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
0155             j_cdbs->prev_cpu_nice = cur_nice;
0156         }
0157 
0158         if (unlikely(!time_elapsed)) {
0159             /*
0160              * That can only happen when this function is called
0161              * twice in a row with a very short interval between the
0162              * calls, so the previous load value can be used then.
0163              */
0164             load = j_cdbs->prev_load;
0165         } else if (unlikely((int)idle_time > 2 * sampling_rate &&
0166                     j_cdbs->prev_load)) {
0167             /*
0168              * If the CPU had gone completely idle and a task has
0169              * just woken up on this CPU now, it would be unfair to
0170              * calculate 'load' the usual way for this elapsed
0171              * time-window, because it would show near-zero load,
0172              * irrespective of how CPU intensive that task actually
0173              * was. This is undesirable for latency-sensitive bursty
0174              * workloads.
0175              *
0176              * To avoid this, reuse the 'load' from the previous
0177              * time-window and give this task a chance to start with
0178              * a reasonably high CPU frequency. However, that
0179              * shouldn't be over-done, lest we get stuck at a high
0180              * load (high frequency) for too long, even when the
0181              * current system load has actually dropped down, so
0182              * clear prev_load to guarantee that the load will be
0183              * computed again next time.
0184              *
0185              * Detecting this situation is easy: an unusually large
0186              * 'idle_time' (as compared to the sampling rate)
0187              * indicates this scenario.
0188              */
0189             load = j_cdbs->prev_load;
0190             j_cdbs->prev_load = 0;
0191         } else {
0192             if (time_elapsed >= idle_time) {
0193                 load = 100 * (time_elapsed - idle_time) / time_elapsed;
0194             } else {
0195                 /*
0196                  * That can happen if idle_time is returned by
0197                  * get_cpu_idle_time_jiffy().  In that case
0198                  * idle_time is roughly equal to the difference
0199                  * between time_elapsed and "busy time" obtained
0200                  * from CPU statistics.  Then, the "busy time"
0201                  * can end up being greater than time_elapsed
0202                  * (for example, if jiffies_64 and the CPU
0203                  * statistics are updated by different CPUs),
0204                  * so idle_time may in fact be negative.  That
0205                  * means, though, that the CPU was busy all
0206                  * the time (on the rough average) during the
0207                  * last sampling interval and 100 can be
0208                  * returned as the load.
0209                  */
0210                 load = (int)idle_time < 0 ? 100 : 0;
0211             }
0212             j_cdbs->prev_load = load;
0213         }
0214 
0215         if (unlikely((int)idle_time > 2 * sampling_rate)) {
0216             unsigned int periods = idle_time / sampling_rate;
0217 
0218             if (periods < idle_periods)
0219                 idle_periods = periods;
0220         }
0221 
0222         if (load > max_load)
0223             max_load = load;
0224     }
0225 
0226     policy_dbs->idle_periods = idle_periods;
0227 
0228     return max_load;
0229 }
0230 EXPORT_SYMBOL_GPL(dbs_update);
0231 
0232 static void dbs_work_handler(struct work_struct *work)
0233 {
0234     struct policy_dbs_info *policy_dbs;
0235     struct cpufreq_policy *policy;
0236     struct dbs_governor *gov;
0237 
0238     policy_dbs = container_of(work, struct policy_dbs_info, work);
0239     policy = policy_dbs->policy;
0240     gov = dbs_governor_of(policy);
0241 
0242     /*
0243      * Make sure cpufreq_governor_limits() isn't evaluating load or the
0244      * ondemand governor isn't updating the sampling rate in parallel.
0245      */
0246     mutex_lock(&policy_dbs->update_mutex);
0247     gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
0248     mutex_unlock(&policy_dbs->update_mutex);
0249 
0250     /* Allow the utilization update handler to queue up more work. */
0251     atomic_set(&policy_dbs->work_count, 0);
0252     /*
0253      * If the update below is reordered with respect to the sample delay
0254      * modification, the utilization update handler may end up using a stale
0255      * sample delay value.
0256      */
0257     smp_wmb();
0258     policy_dbs->work_in_progress = false;
0259 }
0260 
0261 static void dbs_irq_work(struct irq_work *irq_work)
0262 {
0263     struct policy_dbs_info *policy_dbs;
0264 
0265     policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
0266     schedule_work_on(smp_processor_id(), &policy_dbs->work);
0267 }
0268 
0269 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
0270                     unsigned int flags)
0271 {
0272     struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
0273     struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
0274     u64 delta_ns, lst;
0275 
0276     if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
0277         return;
0278 
0279     /*
0280      * The work may not be allowed to be queued up right now.
0281      * Possible reasons:
0282      * - Work has already been queued up or is in progress.
0283      * - It is too early (too little time from the previous sample).
0284      */
0285     if (policy_dbs->work_in_progress)
0286         return;
0287 
0288     /*
0289      * If the reads below are reordered before the check above, the value
0290      * of sample_delay_ns used in the computation may be stale.
0291      */
0292     smp_rmb();
0293     lst = READ_ONCE(policy_dbs->last_sample_time);
0294     delta_ns = time - lst;
0295     if ((s64)delta_ns < policy_dbs->sample_delay_ns)
0296         return;
0297 
0298     /*
0299      * If the policy is not shared, the irq_work may be queued up right away
0300      * at this point.  Otherwise, we need to ensure that only one of the
0301      * CPUs sharing the policy will do that.
0302      */
0303     if (policy_dbs->is_shared) {
0304         if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
0305             return;
0306 
0307         /*
0308          * If another CPU updated last_sample_time in the meantime, we
0309          * shouldn't be here, so clear the work counter and bail out.
0310          */
0311         if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
0312             atomic_set(&policy_dbs->work_count, 0);
0313             return;
0314         }
0315     }
0316 
0317     policy_dbs->last_sample_time = time;
0318     policy_dbs->work_in_progress = true;
0319     irq_work_queue(&policy_dbs->irq_work);
0320 }
0321 
0322 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
0323                 unsigned int delay_us)
0324 {
0325     struct cpufreq_policy *policy = policy_dbs->policy;
0326     int cpu;
0327 
0328     gov_update_sample_delay(policy_dbs, delay_us);
0329     policy_dbs->last_sample_time = 0;
0330 
0331     for_each_cpu(cpu, policy->cpus) {
0332         struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
0333 
0334         cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
0335                          dbs_update_util_handler);
0336     }
0337 }
0338 
0339 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
0340 {
0341     int i;
0342 
0343     for_each_cpu(i, policy->cpus)
0344         cpufreq_remove_update_util_hook(i);
0345 
0346     synchronize_rcu();
0347 }
0348 
0349 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
0350                              struct dbs_governor *gov)
0351 {
0352     struct policy_dbs_info *policy_dbs;
0353     int j;
0354 
0355     /* Allocate memory for per-policy governor data. */
0356     policy_dbs = gov->alloc();
0357     if (!policy_dbs)
0358         return NULL;
0359 
0360     policy_dbs->policy = policy;
0361     mutex_init(&policy_dbs->update_mutex);
0362     atomic_set(&policy_dbs->work_count, 0);
0363     init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
0364     INIT_WORK(&policy_dbs->work, dbs_work_handler);
0365 
0366     /* Set policy_dbs for all CPUs, online+offline */
0367     for_each_cpu(j, policy->related_cpus) {
0368         struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0369 
0370         j_cdbs->policy_dbs = policy_dbs;
0371     }
0372     return policy_dbs;
0373 }
0374 
0375 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
0376                  struct dbs_governor *gov)
0377 {
0378     int j;
0379 
0380     mutex_destroy(&policy_dbs->update_mutex);
0381 
0382     for_each_cpu(j, policy_dbs->policy->related_cpus) {
0383         struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0384 
0385         j_cdbs->policy_dbs = NULL;
0386         j_cdbs->update_util.func = NULL;
0387     }
0388     gov->free(policy_dbs);
0389 }
0390 
0391 static void cpufreq_dbs_data_release(struct kobject *kobj)
0392 {
0393     struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
0394     struct dbs_governor *gov = dbs_data->gov;
0395 
0396     gov->exit(dbs_data);
0397     kfree(dbs_data);
0398 }
0399 
0400 int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
0401 {
0402     struct dbs_governor *gov = dbs_governor_of(policy);
0403     struct dbs_data *dbs_data;
0404     struct policy_dbs_info *policy_dbs;
0405     int ret = 0;
0406 
0407     /* State should be equivalent to EXIT */
0408     if (policy->governor_data)
0409         return -EBUSY;
0410 
0411     policy_dbs = alloc_policy_dbs_info(policy, gov);
0412     if (!policy_dbs)
0413         return -ENOMEM;
0414 
0415     /* Protect gov->gdbs_data against concurrent updates. */
0416     mutex_lock(&gov_dbs_data_mutex);
0417 
0418     dbs_data = gov->gdbs_data;
0419     if (dbs_data) {
0420         if (WARN_ON(have_governor_per_policy())) {
0421             ret = -EINVAL;
0422             goto free_policy_dbs_info;
0423         }
0424         policy_dbs->dbs_data = dbs_data;
0425         policy->governor_data = policy_dbs;
0426 
0427         gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
0428         goto out;
0429     }
0430 
0431     dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
0432     if (!dbs_data) {
0433         ret = -ENOMEM;
0434         goto free_policy_dbs_info;
0435     }
0436 
0437     dbs_data->gov = gov;
0438     gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
0439 
0440     ret = gov->init(dbs_data);
0441     if (ret)
0442         goto free_policy_dbs_info;
0443 
0444     /*
0445      * The sampling interval should not be less than the transition latency
0446      * of the CPU and it also cannot be too small for dbs_update() to work
0447      * correctly.
0448      */
0449     dbs_data->sampling_rate = max_t(unsigned int,
0450                     CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
0451                     cpufreq_policy_transition_delay_us(policy));
0452 
0453     if (!have_governor_per_policy())
0454         gov->gdbs_data = dbs_data;
0455 
0456     policy_dbs->dbs_data = dbs_data;
0457     policy->governor_data = policy_dbs;
0458 
0459     gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
0460     gov->kobj_type.release = cpufreq_dbs_data_release;
0461     ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
0462                    get_governor_parent_kobj(policy),
0463                    "%s", gov->gov.name);
0464     if (!ret)
0465         goto out;
0466 
0467     /* Failure, so roll back. */
0468     pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
0469 
0470     kobject_put(&dbs_data->attr_set.kobj);
0471 
0472     policy->governor_data = NULL;
0473 
0474     if (!have_governor_per_policy())
0475         gov->gdbs_data = NULL;
0476     gov->exit(dbs_data);
0477     kfree(dbs_data);
0478 
0479 free_policy_dbs_info:
0480     free_policy_dbs_info(policy_dbs, gov);
0481 
0482 out:
0483     mutex_unlock(&gov_dbs_data_mutex);
0484     return ret;
0485 }
0486 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
0487 
0488 void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
0489 {
0490     struct dbs_governor *gov = dbs_governor_of(policy);
0491     struct policy_dbs_info *policy_dbs = policy->governor_data;
0492     struct dbs_data *dbs_data = policy_dbs->dbs_data;
0493     unsigned int count;
0494 
0495     /* Protect gov->gdbs_data against concurrent updates. */
0496     mutex_lock(&gov_dbs_data_mutex);
0497 
0498     count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
0499 
0500     policy->governor_data = NULL;
0501 
0502     if (!count && !have_governor_per_policy())
0503         gov->gdbs_data = NULL;
0504 
0505     free_policy_dbs_info(policy_dbs, gov);
0506 
0507     mutex_unlock(&gov_dbs_data_mutex);
0508 }
0509 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
0510 
0511 int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
0512 {
0513     struct dbs_governor *gov = dbs_governor_of(policy);
0514     struct policy_dbs_info *policy_dbs = policy->governor_data;
0515     struct dbs_data *dbs_data = policy_dbs->dbs_data;
0516     unsigned int sampling_rate, ignore_nice, j;
0517     unsigned int io_busy;
0518 
0519     if (!policy->cur)
0520         return -EINVAL;
0521 
0522     policy_dbs->is_shared = policy_is_shared(policy);
0523     policy_dbs->rate_mult = 1;
0524 
0525     sampling_rate = dbs_data->sampling_rate;
0526     ignore_nice = dbs_data->ignore_nice_load;
0527     io_busy = dbs_data->io_is_busy;
0528 
0529     for_each_cpu(j, policy->cpus) {
0530         struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0531 
0532         j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
0533         /*
0534          * Make the first invocation of dbs_update() compute the load.
0535          */
0536         j_cdbs->prev_load = 0;
0537 
0538         if (ignore_nice)
0539             j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
0540     }
0541 
0542     gov->start(policy);
0543 
0544     gov_set_update_util(policy_dbs, sampling_rate);
0545     return 0;
0546 }
0547 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
0548 
0549 void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
0550 {
0551     struct policy_dbs_info *policy_dbs = policy->governor_data;
0552 
0553     gov_clear_update_util(policy_dbs->policy);
0554     irq_work_sync(&policy_dbs->irq_work);
0555     cancel_work_sync(&policy_dbs->work);
0556     atomic_set(&policy_dbs->work_count, 0);
0557     policy_dbs->work_in_progress = false;
0558 }
0559 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
0560 
0561 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
0562 {
0563     struct policy_dbs_info *policy_dbs;
0564 
0565     /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
0566     mutex_lock(&gov_dbs_data_mutex);
0567     policy_dbs = policy->governor_data;
0568     if (!policy_dbs)
0569         goto out;
0570 
0571     mutex_lock(&policy_dbs->update_mutex);
0572     cpufreq_policy_apply_limits(policy);
0573     gov_update_sample_delay(policy_dbs, 0);
0574     mutex_unlock(&policy_dbs->update_mutex);
0575 
0576 out:
0577     mutex_unlock(&gov_dbs_data_mutex);
0578 }
0579 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);