0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/pm_qos.h>
0024 #include <linux/sched.h>
0025 #include <linux/spinlock.h>
0026 #include <linux/slab.h>
0027 #include <linux/time.h>
0028 #include <linux/fs.h>
0029 #include <linux/device.h>
0030 #include <linux/miscdevice.h>
0031 #include <linux/string.h>
0032 #include <linux/platform_device.h>
0033 #include <linux/init.h>
0034 #include <linux/kernel.h>
0035 #include <linux/debugfs.h>
0036 #include <linux/seq_file.h>
0037
0038 #include <linux/uaccess.h>
0039 #include <linux/export.h>
0040 #include <trace/events/power.h>
0041
0042
0043
0044
0045
0046
0047 static DEFINE_SPINLOCK(pm_qos_lock);
0048
0049
0050
0051
0052
0053 s32 pm_qos_read_value(struct pm_qos_constraints *c)
0054 {
0055 return READ_ONCE(c->target_value);
0056 }
0057
0058 static int pm_qos_get_value(struct pm_qos_constraints *c)
0059 {
0060 if (plist_head_empty(&c->list))
0061 return c->no_constraint_value;
0062
0063 switch (c->type) {
0064 case PM_QOS_MIN:
0065 return plist_first(&c->list)->prio;
0066
0067 case PM_QOS_MAX:
0068 return plist_last(&c->list)->prio;
0069
0070 default:
0071 WARN(1, "Unknown PM QoS type in %s\n", __func__);
0072 return PM_QOS_DEFAULT_VALUE;
0073 }
0074 }
0075
0076 static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
0077 {
0078 WRITE_ONCE(c->target_value, value);
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
0099 enum pm_qos_req_action action, int value)
0100 {
0101 int prev_value, curr_value, new_value;
0102 unsigned long flags;
0103
0104 spin_lock_irqsave(&pm_qos_lock, flags);
0105
0106 prev_value = pm_qos_get_value(c);
0107 if (value == PM_QOS_DEFAULT_VALUE)
0108 new_value = c->default_value;
0109 else
0110 new_value = value;
0111
0112 switch (action) {
0113 case PM_QOS_REMOVE_REQ:
0114 plist_del(node, &c->list);
0115 break;
0116 case PM_QOS_UPDATE_REQ:
0117
0118
0119
0120
0121 plist_del(node, &c->list);
0122 fallthrough;
0123 case PM_QOS_ADD_REQ:
0124 plist_node_init(node, new_value);
0125 plist_add(node, &c->list);
0126 break;
0127 default:
0128
0129 ;
0130 }
0131
0132 curr_value = pm_qos_get_value(c);
0133 pm_qos_set_value(c, curr_value);
0134
0135 spin_unlock_irqrestore(&pm_qos_lock, flags);
0136
0137 trace_pm_qos_update_target(action, prev_value, curr_value);
0138
0139 if (prev_value == curr_value)
0140 return 0;
0141
0142 if (c->notifiers)
0143 blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
0144
0145 return 1;
0146 }
0147
0148
0149
0150
0151
0152
0153 static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
0154 struct pm_qos_flags_request *req)
0155 {
0156 s32 val = 0;
0157
0158 list_del(&req->node);
0159 list_for_each_entry(req, &pqf->list, node)
0160 val |= req->flags;
0161
0162 pqf->effective_flags = val;
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
0175 struct pm_qos_flags_request *req,
0176 enum pm_qos_req_action action, s32 val)
0177 {
0178 unsigned long irqflags;
0179 s32 prev_value, curr_value;
0180
0181 spin_lock_irqsave(&pm_qos_lock, irqflags);
0182
0183 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
0184
0185 switch (action) {
0186 case PM_QOS_REMOVE_REQ:
0187 pm_qos_flags_remove_req(pqf, req);
0188 break;
0189 case PM_QOS_UPDATE_REQ:
0190 pm_qos_flags_remove_req(pqf, req);
0191 fallthrough;
0192 case PM_QOS_ADD_REQ:
0193 req->flags = val;
0194 INIT_LIST_HEAD(&req->node);
0195 list_add_tail(&req->node, &pqf->list);
0196 pqf->effective_flags |= val;
0197 break;
0198 default:
0199
0200 ;
0201 }
0202
0203 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
0204
0205 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
0206
0207 trace_pm_qos_update_flags(action, prev_value, curr_value);
0208
0209 return prev_value != curr_value;
0210 }
0211
0212 #ifdef CONFIG_CPU_IDLE
0213
0214
0215 static struct pm_qos_constraints cpu_latency_constraints = {
0216 .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
0217 .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
0218 .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
0219 .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
0220 .type = PM_QOS_MIN,
0221 };
0222
0223
0224
0225
0226 s32 cpu_latency_qos_limit(void)
0227 {
0228 return pm_qos_read_value(&cpu_latency_constraints);
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238 bool cpu_latency_qos_request_active(struct pm_qos_request *req)
0239 {
0240 return req->qos == &cpu_latency_constraints;
0241 }
0242 EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
0243
0244 static void cpu_latency_qos_apply(struct pm_qos_request *req,
0245 enum pm_qos_req_action action, s32 value)
0246 {
0247 int ret = pm_qos_update_target(req->qos, &req->node, action, value);
0248 if (ret > 0)
0249 wake_up_all_idle_cpus();
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
0265 {
0266 if (!req)
0267 return;
0268
0269 if (cpu_latency_qos_request_active(req)) {
0270 WARN(1, KERN_ERR "%s called for already added request\n", __func__);
0271 return;
0272 }
0273
0274 trace_pm_qos_add_request(value);
0275
0276 req->qos = &cpu_latency_constraints;
0277 cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
0278 }
0279 EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290 void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
0291 {
0292 if (!req)
0293 return;
0294
0295 if (!cpu_latency_qos_request_active(req)) {
0296 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
0297 return;
0298 }
0299
0300 trace_pm_qos_update_request(new_value);
0301
0302 if (new_value == req->node.prio)
0303 return;
0304
0305 cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
0306 }
0307 EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
0308
0309
0310
0311
0312
0313
0314
0315
0316 void cpu_latency_qos_remove_request(struct pm_qos_request *req)
0317 {
0318 if (!req)
0319 return;
0320
0321 if (!cpu_latency_qos_request_active(req)) {
0322 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
0323 return;
0324 }
0325
0326 trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
0327
0328 cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
0329 memset(req, 0, sizeof(*req));
0330 }
0331 EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
0332
0333
0334
0335 static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
0336 {
0337 struct pm_qos_request *req;
0338
0339 req = kzalloc(sizeof(*req), GFP_KERNEL);
0340 if (!req)
0341 return -ENOMEM;
0342
0343 cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
0344 filp->private_data = req;
0345
0346 return 0;
0347 }
0348
0349 static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
0350 {
0351 struct pm_qos_request *req = filp->private_data;
0352
0353 filp->private_data = NULL;
0354
0355 cpu_latency_qos_remove_request(req);
0356 kfree(req);
0357
0358 return 0;
0359 }
0360
0361 static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
0362 size_t count, loff_t *f_pos)
0363 {
0364 struct pm_qos_request *req = filp->private_data;
0365 unsigned long flags;
0366 s32 value;
0367
0368 if (!req || !cpu_latency_qos_request_active(req))
0369 return -EINVAL;
0370
0371 spin_lock_irqsave(&pm_qos_lock, flags);
0372 value = pm_qos_get_value(&cpu_latency_constraints);
0373 spin_unlock_irqrestore(&pm_qos_lock, flags);
0374
0375 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
0376 }
0377
0378 static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
0379 size_t count, loff_t *f_pos)
0380 {
0381 s32 value;
0382
0383 if (count == sizeof(s32)) {
0384 if (copy_from_user(&value, buf, sizeof(s32)))
0385 return -EFAULT;
0386 } else {
0387 int ret;
0388
0389 ret = kstrtos32_from_user(buf, count, 16, &value);
0390 if (ret)
0391 return ret;
0392 }
0393
0394 cpu_latency_qos_update_request(filp->private_data, value);
0395
0396 return count;
0397 }
0398
0399 static const struct file_operations cpu_latency_qos_fops = {
0400 .write = cpu_latency_qos_write,
0401 .read = cpu_latency_qos_read,
0402 .open = cpu_latency_qos_open,
0403 .release = cpu_latency_qos_release,
0404 .llseek = noop_llseek,
0405 };
0406
0407 static struct miscdevice cpu_latency_qos_miscdev = {
0408 .minor = MISC_DYNAMIC_MINOR,
0409 .name = "cpu_dma_latency",
0410 .fops = &cpu_latency_qos_fops,
0411 };
0412
0413 static int __init cpu_latency_qos_init(void)
0414 {
0415 int ret;
0416
0417 ret = misc_register(&cpu_latency_qos_miscdev);
0418 if (ret < 0)
0419 pr_err("%s: %s setup failed\n", __func__,
0420 cpu_latency_qos_miscdev.name);
0421
0422 return ret;
0423 }
0424 late_initcall(cpu_latency_qos_init);
0425 #endif
0426
0427
0428
0429
0430
0431
0432
0433 void freq_constraints_init(struct freq_constraints *qos)
0434 {
0435 struct pm_qos_constraints *c;
0436
0437 c = &qos->min_freq;
0438 plist_head_init(&c->list);
0439 c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
0440 c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
0441 c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
0442 c->type = PM_QOS_MAX;
0443 c->notifiers = &qos->min_freq_notifiers;
0444 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
0445
0446 c = &qos->max_freq;
0447 plist_head_init(&c->list);
0448 c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
0449 c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
0450 c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
0451 c->type = PM_QOS_MIN;
0452 c->notifiers = &qos->max_freq_notifiers;
0453 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
0454 }
0455
0456
0457
0458
0459
0460
0461 s32 freq_qos_read_value(struct freq_constraints *qos,
0462 enum freq_qos_req_type type)
0463 {
0464 s32 ret;
0465
0466 switch (type) {
0467 case FREQ_QOS_MIN:
0468 ret = IS_ERR_OR_NULL(qos) ?
0469 FREQ_QOS_MIN_DEFAULT_VALUE :
0470 pm_qos_read_value(&qos->min_freq);
0471 break;
0472 case FREQ_QOS_MAX:
0473 ret = IS_ERR_OR_NULL(qos) ?
0474 FREQ_QOS_MAX_DEFAULT_VALUE :
0475 pm_qos_read_value(&qos->max_freq);
0476 break;
0477 default:
0478 WARN_ON(1);
0479 ret = 0;
0480 }
0481
0482 return ret;
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 int freq_qos_apply(struct freq_qos_request *req,
0494 enum pm_qos_req_action action, s32 value)
0495 {
0496 int ret;
0497
0498 switch(req->type) {
0499 case FREQ_QOS_MIN:
0500 ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
0501 action, value);
0502 break;
0503 case FREQ_QOS_MAX:
0504 ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
0505 action, value);
0506 break;
0507 default:
0508 ret = -EINVAL;
0509 }
0510
0511 return ret;
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 int freq_qos_add_request(struct freq_constraints *qos,
0529 struct freq_qos_request *req,
0530 enum freq_qos_req_type type, s32 value)
0531 {
0532 int ret;
0533
0534 if (IS_ERR_OR_NULL(qos) || !req || value < 0)
0535 return -EINVAL;
0536
0537 if (WARN(freq_qos_request_active(req),
0538 "%s() called for active request\n", __func__))
0539 return -EINVAL;
0540
0541 req->qos = qos;
0542 req->type = type;
0543 ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
0544 if (ret < 0) {
0545 req->qos = NULL;
0546 req->type = 0;
0547 }
0548
0549 return ret;
0550 }
0551 EXPORT_SYMBOL_GPL(freq_qos_add_request);
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
0565 {
0566 if (!req || new_value < 0)
0567 return -EINVAL;
0568
0569 if (WARN(!freq_qos_request_active(req),
0570 "%s() called for unknown object\n", __func__))
0571 return -EINVAL;
0572
0573 if (req->pnode.prio == new_value)
0574 return 0;
0575
0576 return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
0577 }
0578 EXPORT_SYMBOL_GPL(freq_qos_update_request);
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 int freq_qos_remove_request(struct freq_qos_request *req)
0591 {
0592 int ret;
0593
0594 if (!req)
0595 return -EINVAL;
0596
0597 if (WARN(!freq_qos_request_active(req),
0598 "%s() called for unknown object\n", __func__))
0599 return -EINVAL;
0600
0601 ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
0602 req->qos = NULL;
0603 req->type = 0;
0604
0605 return ret;
0606 }
0607 EXPORT_SYMBOL_GPL(freq_qos_remove_request);
0608
0609
0610
0611
0612
0613
0614
0615 int freq_qos_add_notifier(struct freq_constraints *qos,
0616 enum freq_qos_req_type type,
0617 struct notifier_block *notifier)
0618 {
0619 int ret;
0620
0621 if (IS_ERR_OR_NULL(qos) || !notifier)
0622 return -EINVAL;
0623
0624 switch (type) {
0625 case FREQ_QOS_MIN:
0626 ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
0627 notifier);
0628 break;
0629 case FREQ_QOS_MAX:
0630 ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
0631 notifier);
0632 break;
0633 default:
0634 WARN_ON(1);
0635 ret = -EINVAL;
0636 }
0637
0638 return ret;
0639 }
0640 EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
0641
0642
0643
0644
0645
0646
0647
0648 int freq_qos_remove_notifier(struct freq_constraints *qos,
0649 enum freq_qos_req_type type,
0650 struct notifier_block *notifier)
0651 {
0652 int ret;
0653
0654 if (IS_ERR_OR_NULL(qos) || !notifier)
0655 return -EINVAL;
0656
0657 switch (type) {
0658 case FREQ_QOS_MIN:
0659 ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
0660 notifier);
0661 break;
0662 case FREQ_QOS_MAX:
0663 ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
0664 notifier);
0665 break;
0666 default:
0667 WARN_ON(1);
0668 ret = -EINVAL;
0669 }
0670
0671 return ret;
0672 }
0673 EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);