0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0015
0016 #include <linux/export.h>
0017 #include <linux/kernel_stat.h>
0018 #include <linux/slab.h>
0019
0020 #include "cpufreq_governor.h"
0021
0022 #define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
0023
0024 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
0025
0026 static DEFINE_MUTEX(gov_dbs_data_mutex);
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
0045 size_t count)
0046 {
0047 struct dbs_data *dbs_data = to_dbs_data(attr_set);
0048 struct policy_dbs_info *policy_dbs;
0049 unsigned int sampling_interval;
0050 int ret;
0051
0052 ret = sscanf(buf, "%u", &sampling_interval);
0053 if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
0054 return -EINVAL;
0055
0056 dbs_data->sampling_rate = sampling_interval;
0057
0058
0059
0060
0061
0062 list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
0063 mutex_lock(&policy_dbs->update_mutex);
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 gov_update_sample_delay(policy_dbs, 0);
0078 mutex_unlock(&policy_dbs->update_mutex);
0079 }
0080
0081 return count;
0082 }
0083 EXPORT_SYMBOL_GPL(sampling_rate_store);
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 void gov_update_cpu_data(struct dbs_data *dbs_data)
0096 {
0097 struct policy_dbs_info *policy_dbs;
0098
0099 list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
0100 unsigned int j;
0101
0102 for_each_cpu(j, policy_dbs->policy->cpus) {
0103 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0104
0105 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
0106 dbs_data->io_is_busy);
0107 if (dbs_data->ignore_nice_load)
0108 j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
0109 }
0110 }
0111 }
0112 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
0113
0114 unsigned int dbs_update(struct cpufreq_policy *policy)
0115 {
0116 struct policy_dbs_info *policy_dbs = policy->governor_data;
0117 struct dbs_data *dbs_data = policy_dbs->dbs_data;
0118 unsigned int ignore_nice = dbs_data->ignore_nice_load;
0119 unsigned int max_load = 0, idle_periods = UINT_MAX;
0120 unsigned int sampling_rate, io_busy, j;
0121
0122
0123
0124
0125
0126
0127
0128 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
0129
0130
0131
0132
0133
0134 io_busy = dbs_data->io_is_busy;
0135
0136
0137 for_each_cpu(j, policy->cpus) {
0138 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0139 u64 update_time, cur_idle_time;
0140 unsigned int idle_time, time_elapsed;
0141 unsigned int load;
0142
0143 cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
0144
0145 time_elapsed = update_time - j_cdbs->prev_update_time;
0146 j_cdbs->prev_update_time = update_time;
0147
0148 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
0149 j_cdbs->prev_cpu_idle = cur_idle_time;
0150
0151 if (ignore_nice) {
0152 u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
0153
0154 idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
0155 j_cdbs->prev_cpu_nice = cur_nice;
0156 }
0157
0158 if (unlikely(!time_elapsed)) {
0159
0160
0161
0162
0163
0164 load = j_cdbs->prev_load;
0165 } else if (unlikely((int)idle_time > 2 * sampling_rate &&
0166 j_cdbs->prev_load)) {
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 load = j_cdbs->prev_load;
0190 j_cdbs->prev_load = 0;
0191 } else {
0192 if (time_elapsed >= idle_time) {
0193 load = 100 * (time_elapsed - idle_time) / time_elapsed;
0194 } else {
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 load = (int)idle_time < 0 ? 100 : 0;
0211 }
0212 j_cdbs->prev_load = load;
0213 }
0214
0215 if (unlikely((int)idle_time > 2 * sampling_rate)) {
0216 unsigned int periods = idle_time / sampling_rate;
0217
0218 if (periods < idle_periods)
0219 idle_periods = periods;
0220 }
0221
0222 if (load > max_load)
0223 max_load = load;
0224 }
0225
0226 policy_dbs->idle_periods = idle_periods;
0227
0228 return max_load;
0229 }
0230 EXPORT_SYMBOL_GPL(dbs_update);
0231
0232 static void dbs_work_handler(struct work_struct *work)
0233 {
0234 struct policy_dbs_info *policy_dbs;
0235 struct cpufreq_policy *policy;
0236 struct dbs_governor *gov;
0237
0238 policy_dbs = container_of(work, struct policy_dbs_info, work);
0239 policy = policy_dbs->policy;
0240 gov = dbs_governor_of(policy);
0241
0242
0243
0244
0245
0246 mutex_lock(&policy_dbs->update_mutex);
0247 gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
0248 mutex_unlock(&policy_dbs->update_mutex);
0249
0250
0251 atomic_set(&policy_dbs->work_count, 0);
0252
0253
0254
0255
0256
0257 smp_wmb();
0258 policy_dbs->work_in_progress = false;
0259 }
0260
0261 static void dbs_irq_work(struct irq_work *irq_work)
0262 {
0263 struct policy_dbs_info *policy_dbs;
0264
0265 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
0266 schedule_work_on(smp_processor_id(), &policy_dbs->work);
0267 }
0268
0269 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
0270 unsigned int flags)
0271 {
0272 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
0273 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
0274 u64 delta_ns, lst;
0275
0276 if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
0277 return;
0278
0279
0280
0281
0282
0283
0284
0285 if (policy_dbs->work_in_progress)
0286 return;
0287
0288
0289
0290
0291
0292 smp_rmb();
0293 lst = READ_ONCE(policy_dbs->last_sample_time);
0294 delta_ns = time - lst;
0295 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
0296 return;
0297
0298
0299
0300
0301
0302
0303 if (policy_dbs->is_shared) {
0304 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
0305 return;
0306
0307
0308
0309
0310
0311 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
0312 atomic_set(&policy_dbs->work_count, 0);
0313 return;
0314 }
0315 }
0316
0317 policy_dbs->last_sample_time = time;
0318 policy_dbs->work_in_progress = true;
0319 irq_work_queue(&policy_dbs->irq_work);
0320 }
0321
0322 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
0323 unsigned int delay_us)
0324 {
0325 struct cpufreq_policy *policy = policy_dbs->policy;
0326 int cpu;
0327
0328 gov_update_sample_delay(policy_dbs, delay_us);
0329 policy_dbs->last_sample_time = 0;
0330
0331 for_each_cpu(cpu, policy->cpus) {
0332 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
0333
0334 cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
0335 dbs_update_util_handler);
0336 }
0337 }
0338
0339 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
0340 {
0341 int i;
0342
0343 for_each_cpu(i, policy->cpus)
0344 cpufreq_remove_update_util_hook(i);
0345
0346 synchronize_rcu();
0347 }
0348
0349 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
0350 struct dbs_governor *gov)
0351 {
0352 struct policy_dbs_info *policy_dbs;
0353 int j;
0354
0355
0356 policy_dbs = gov->alloc();
0357 if (!policy_dbs)
0358 return NULL;
0359
0360 policy_dbs->policy = policy;
0361 mutex_init(&policy_dbs->update_mutex);
0362 atomic_set(&policy_dbs->work_count, 0);
0363 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
0364 INIT_WORK(&policy_dbs->work, dbs_work_handler);
0365
0366
0367 for_each_cpu(j, policy->related_cpus) {
0368 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0369
0370 j_cdbs->policy_dbs = policy_dbs;
0371 }
0372 return policy_dbs;
0373 }
0374
0375 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
0376 struct dbs_governor *gov)
0377 {
0378 int j;
0379
0380 mutex_destroy(&policy_dbs->update_mutex);
0381
0382 for_each_cpu(j, policy_dbs->policy->related_cpus) {
0383 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0384
0385 j_cdbs->policy_dbs = NULL;
0386 j_cdbs->update_util.func = NULL;
0387 }
0388 gov->free(policy_dbs);
0389 }
0390
0391 static void cpufreq_dbs_data_release(struct kobject *kobj)
0392 {
0393 struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
0394 struct dbs_governor *gov = dbs_data->gov;
0395
0396 gov->exit(dbs_data);
0397 kfree(dbs_data);
0398 }
0399
0400 int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
0401 {
0402 struct dbs_governor *gov = dbs_governor_of(policy);
0403 struct dbs_data *dbs_data;
0404 struct policy_dbs_info *policy_dbs;
0405 int ret = 0;
0406
0407
0408 if (policy->governor_data)
0409 return -EBUSY;
0410
0411 policy_dbs = alloc_policy_dbs_info(policy, gov);
0412 if (!policy_dbs)
0413 return -ENOMEM;
0414
0415
0416 mutex_lock(&gov_dbs_data_mutex);
0417
0418 dbs_data = gov->gdbs_data;
0419 if (dbs_data) {
0420 if (WARN_ON(have_governor_per_policy())) {
0421 ret = -EINVAL;
0422 goto free_policy_dbs_info;
0423 }
0424 policy_dbs->dbs_data = dbs_data;
0425 policy->governor_data = policy_dbs;
0426
0427 gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
0428 goto out;
0429 }
0430
0431 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
0432 if (!dbs_data) {
0433 ret = -ENOMEM;
0434 goto free_policy_dbs_info;
0435 }
0436
0437 dbs_data->gov = gov;
0438 gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
0439
0440 ret = gov->init(dbs_data);
0441 if (ret)
0442 goto free_policy_dbs_info;
0443
0444
0445
0446
0447
0448
0449 dbs_data->sampling_rate = max_t(unsigned int,
0450 CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
0451 cpufreq_policy_transition_delay_us(policy));
0452
0453 if (!have_governor_per_policy())
0454 gov->gdbs_data = dbs_data;
0455
0456 policy_dbs->dbs_data = dbs_data;
0457 policy->governor_data = policy_dbs;
0458
0459 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
0460 gov->kobj_type.release = cpufreq_dbs_data_release;
0461 ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
0462 get_governor_parent_kobj(policy),
0463 "%s", gov->gov.name);
0464 if (!ret)
0465 goto out;
0466
0467
0468 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
0469
0470 kobject_put(&dbs_data->attr_set.kobj);
0471
0472 policy->governor_data = NULL;
0473
0474 if (!have_governor_per_policy())
0475 gov->gdbs_data = NULL;
0476 gov->exit(dbs_data);
0477 kfree(dbs_data);
0478
0479 free_policy_dbs_info:
0480 free_policy_dbs_info(policy_dbs, gov);
0481
0482 out:
0483 mutex_unlock(&gov_dbs_data_mutex);
0484 return ret;
0485 }
0486 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
0487
0488 void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
0489 {
0490 struct dbs_governor *gov = dbs_governor_of(policy);
0491 struct policy_dbs_info *policy_dbs = policy->governor_data;
0492 struct dbs_data *dbs_data = policy_dbs->dbs_data;
0493 unsigned int count;
0494
0495
0496 mutex_lock(&gov_dbs_data_mutex);
0497
0498 count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
0499
0500 policy->governor_data = NULL;
0501
0502 if (!count && !have_governor_per_policy())
0503 gov->gdbs_data = NULL;
0504
0505 free_policy_dbs_info(policy_dbs, gov);
0506
0507 mutex_unlock(&gov_dbs_data_mutex);
0508 }
0509 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
0510
0511 int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
0512 {
0513 struct dbs_governor *gov = dbs_governor_of(policy);
0514 struct policy_dbs_info *policy_dbs = policy->governor_data;
0515 struct dbs_data *dbs_data = policy_dbs->dbs_data;
0516 unsigned int sampling_rate, ignore_nice, j;
0517 unsigned int io_busy;
0518
0519 if (!policy->cur)
0520 return -EINVAL;
0521
0522 policy_dbs->is_shared = policy_is_shared(policy);
0523 policy_dbs->rate_mult = 1;
0524
0525 sampling_rate = dbs_data->sampling_rate;
0526 ignore_nice = dbs_data->ignore_nice_load;
0527 io_busy = dbs_data->io_is_busy;
0528
0529 for_each_cpu(j, policy->cpus) {
0530 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
0531
0532 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
0533
0534
0535
0536 j_cdbs->prev_load = 0;
0537
0538 if (ignore_nice)
0539 j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
0540 }
0541
0542 gov->start(policy);
0543
0544 gov_set_update_util(policy_dbs, sampling_rate);
0545 return 0;
0546 }
0547 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
0548
0549 void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
0550 {
0551 struct policy_dbs_info *policy_dbs = policy->governor_data;
0552
0553 gov_clear_update_util(policy_dbs->policy);
0554 irq_work_sync(&policy_dbs->irq_work);
0555 cancel_work_sync(&policy_dbs->work);
0556 atomic_set(&policy_dbs->work_count, 0);
0557 policy_dbs->work_in_progress = false;
0558 }
0559 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
0560
0561 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
0562 {
0563 struct policy_dbs_info *policy_dbs;
0564
0565
0566 mutex_lock(&gov_dbs_data_mutex);
0567 policy_dbs = policy->governor_data;
0568 if (!policy_dbs)
0569 goto out;
0570
0571 mutex_lock(&policy_dbs->update_mutex);
0572 cpufreq_policy_apply_limits(policy);
0573 gov_update_sample_delay(policy_dbs, 0);
0574 mutex_unlock(&policy_dbs->update_mutex);
0575
0576 out:
0577 mutex_unlock(&gov_dbs_data_mutex);
0578 }
0579 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);