0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/pm_qos.h>
0031 #include <linux/spinlock.h>
0032 #include <linux/slab.h>
0033 #include <linux/device.h>
0034 #include <linux/mutex.h>
0035 #include <linux/export.h>
0036 #include <linux/pm_runtime.h>
0037 #include <linux/err.h>
0038 #include <trace/events/power.h>
0039
0040 #include "power.h"
0041
0042 static DEFINE_MUTEX(dev_pm_qos_mtx);
0043 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
0044
0045
0046
0047
0048
0049
0050
0051
0052 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
0053 {
0054 struct dev_pm_qos *qos = dev->power.qos;
0055 struct pm_qos_flags *pqf;
0056 s32 val;
0057
0058 lockdep_assert_held(&dev->power.lock);
0059
0060 if (IS_ERR_OR_NULL(qos))
0061 return PM_QOS_FLAGS_UNDEFINED;
0062
0063 pqf = &qos->flags;
0064 if (list_empty(&pqf->list))
0065 return PM_QOS_FLAGS_UNDEFINED;
0066
0067 val = pqf->effective_flags & mask;
0068 if (val)
0069 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
0070
0071 return PM_QOS_FLAGS_NONE;
0072 }
0073
0074
0075
0076
0077
0078
0079 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
0080 {
0081 unsigned long irqflags;
0082 enum pm_qos_flags_status ret;
0083
0084 spin_lock_irqsave(&dev->power.lock, irqflags);
0085 ret = __dev_pm_qos_flags(dev, mask);
0086 spin_unlock_irqrestore(&dev->power.lock, irqflags);
0087
0088 return ret;
0089 }
0090 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
0091
0092
0093
0094
0095
0096
0097
0098 s32 __dev_pm_qos_resume_latency(struct device *dev)
0099 {
0100 lockdep_assert_held(&dev->power.lock);
0101
0102 return dev_pm_qos_raw_resume_latency(dev);
0103 }
0104
0105
0106
0107
0108
0109
0110 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
0111 {
0112 struct dev_pm_qos *qos = dev->power.qos;
0113 unsigned long flags;
0114 s32 ret;
0115
0116 spin_lock_irqsave(&dev->power.lock, flags);
0117
0118 switch (type) {
0119 case DEV_PM_QOS_RESUME_LATENCY:
0120 ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
0121 : pm_qos_read_value(&qos->resume_latency);
0122 break;
0123 case DEV_PM_QOS_MIN_FREQUENCY:
0124 ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
0125 : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
0126 break;
0127 case DEV_PM_QOS_MAX_FREQUENCY:
0128 ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
0129 : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
0130 break;
0131 default:
0132 WARN_ON(1);
0133 ret = 0;
0134 }
0135
0136 spin_unlock_irqrestore(&dev->power.lock, flags);
0137
0138 return ret;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 static int apply_constraint(struct dev_pm_qos_request *req,
0151 enum pm_qos_req_action action, s32 value)
0152 {
0153 struct dev_pm_qos *qos = req->dev->power.qos;
0154 int ret;
0155
0156 switch(req->type) {
0157 case DEV_PM_QOS_RESUME_LATENCY:
0158 if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
0159 value = 0;
0160
0161 ret = pm_qos_update_target(&qos->resume_latency,
0162 &req->data.pnode, action, value);
0163 break;
0164 case DEV_PM_QOS_LATENCY_TOLERANCE:
0165 ret = pm_qos_update_target(&qos->latency_tolerance,
0166 &req->data.pnode, action, value);
0167 if (ret) {
0168 value = pm_qos_read_value(&qos->latency_tolerance);
0169 req->dev->power.set_latency_tolerance(req->dev, value);
0170 }
0171 break;
0172 case DEV_PM_QOS_MIN_FREQUENCY:
0173 case DEV_PM_QOS_MAX_FREQUENCY:
0174 ret = freq_qos_apply(&req->data.freq, action, value);
0175 break;
0176 case DEV_PM_QOS_FLAGS:
0177 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
0178 action, value);
0179 break;
0180 default:
0181 ret = -EINVAL;
0182 }
0183
0184 return ret;
0185 }
0186
0187
0188
0189
0190
0191
0192
0193
0194 static int dev_pm_qos_constraints_allocate(struct device *dev)
0195 {
0196 struct dev_pm_qos *qos;
0197 struct pm_qos_constraints *c;
0198 struct blocking_notifier_head *n;
0199
0200 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
0201 if (!qos)
0202 return -ENOMEM;
0203
0204 n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
0205 if (!n) {
0206 kfree(qos);
0207 return -ENOMEM;
0208 }
0209
0210 c = &qos->resume_latency;
0211 plist_head_init(&c->list);
0212 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
0213 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
0214 c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
0215 c->type = PM_QOS_MIN;
0216 c->notifiers = n;
0217 BLOCKING_INIT_NOTIFIER_HEAD(n);
0218
0219 c = &qos->latency_tolerance;
0220 plist_head_init(&c->list);
0221 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
0222 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
0223 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
0224 c->type = PM_QOS_MIN;
0225
0226 freq_constraints_init(&qos->freq);
0227
0228 INIT_LIST_HEAD(&qos->flags.list);
0229
0230 spin_lock_irq(&dev->power.lock);
0231 dev->power.qos = qos;
0232 spin_unlock_irq(&dev->power.lock);
0233
0234 return 0;
0235 }
0236
0237 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
0238 static void __dev_pm_qos_hide_flags(struct device *dev);
0239
0240
0241
0242
0243
0244
0245
0246 void dev_pm_qos_constraints_destroy(struct device *dev)
0247 {
0248 struct dev_pm_qos *qos;
0249 struct dev_pm_qos_request *req, *tmp;
0250 struct pm_qos_constraints *c;
0251 struct pm_qos_flags *f;
0252
0253 mutex_lock(&dev_pm_qos_sysfs_mtx);
0254
0255
0256
0257
0258
0259 pm_qos_sysfs_remove_resume_latency(dev);
0260 pm_qos_sysfs_remove_flags(dev);
0261
0262 mutex_lock(&dev_pm_qos_mtx);
0263
0264 __dev_pm_qos_hide_latency_limit(dev);
0265 __dev_pm_qos_hide_flags(dev);
0266
0267 qos = dev->power.qos;
0268 if (!qos)
0269 goto out;
0270
0271
0272 c = &qos->resume_latency;
0273 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
0274
0275
0276
0277
0278 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
0279 memset(req, 0, sizeof(*req));
0280 }
0281
0282 c = &qos->latency_tolerance;
0283 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
0284 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
0285 memset(req, 0, sizeof(*req));
0286 }
0287
0288 c = &qos->freq.min_freq;
0289 plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
0290 apply_constraint(req, PM_QOS_REMOVE_REQ,
0291 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
0292 memset(req, 0, sizeof(*req));
0293 }
0294
0295 c = &qos->freq.max_freq;
0296 plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
0297 apply_constraint(req, PM_QOS_REMOVE_REQ,
0298 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
0299 memset(req, 0, sizeof(*req));
0300 }
0301
0302 f = &qos->flags;
0303 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
0304 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
0305 memset(req, 0, sizeof(*req));
0306 }
0307
0308 spin_lock_irq(&dev->power.lock);
0309 dev->power.qos = ERR_PTR(-ENODEV);
0310 spin_unlock_irq(&dev->power.lock);
0311
0312 kfree(qos->resume_latency.notifiers);
0313 kfree(qos);
0314
0315 out:
0316 mutex_unlock(&dev_pm_qos_mtx);
0317
0318 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0319 }
0320
0321 static bool dev_pm_qos_invalid_req_type(struct device *dev,
0322 enum dev_pm_qos_req_type type)
0323 {
0324 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
0325 !dev->power.set_latency_tolerance;
0326 }
0327
0328 static int __dev_pm_qos_add_request(struct device *dev,
0329 struct dev_pm_qos_request *req,
0330 enum dev_pm_qos_req_type type, s32 value)
0331 {
0332 int ret = 0;
0333
0334 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
0335 return -EINVAL;
0336
0337 if (WARN(dev_pm_qos_request_active(req),
0338 "%s() called for already added request\n", __func__))
0339 return -EINVAL;
0340
0341 if (IS_ERR(dev->power.qos))
0342 ret = -ENODEV;
0343 else if (!dev->power.qos)
0344 ret = dev_pm_qos_constraints_allocate(dev);
0345
0346 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
0347 if (ret)
0348 return ret;
0349
0350 req->dev = dev;
0351 req->type = type;
0352 if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
0353 ret = freq_qos_add_request(&dev->power.qos->freq,
0354 &req->data.freq,
0355 FREQ_QOS_MIN, value);
0356 else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
0357 ret = freq_qos_add_request(&dev->power.qos->freq,
0358 &req->data.freq,
0359 FREQ_QOS_MAX, value);
0360 else
0361 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
0362
0363 return ret;
0364 }
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
0389 enum dev_pm_qos_req_type type, s32 value)
0390 {
0391 int ret;
0392
0393 mutex_lock(&dev_pm_qos_mtx);
0394 ret = __dev_pm_qos_add_request(dev, req, type, value);
0395 mutex_unlock(&dev_pm_qos_mtx);
0396 return ret;
0397 }
0398 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
0399
0400
0401
0402
0403
0404
0405 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
0406 s32 new_value)
0407 {
0408 s32 curr_value;
0409 int ret = 0;
0410
0411 if (!req)
0412 return -EINVAL;
0413
0414 if (WARN(!dev_pm_qos_request_active(req),
0415 "%s() called for unknown object\n", __func__))
0416 return -EINVAL;
0417
0418 if (IS_ERR_OR_NULL(req->dev->power.qos))
0419 return -ENODEV;
0420
0421 switch(req->type) {
0422 case DEV_PM_QOS_RESUME_LATENCY:
0423 case DEV_PM_QOS_LATENCY_TOLERANCE:
0424 curr_value = req->data.pnode.prio;
0425 break;
0426 case DEV_PM_QOS_MIN_FREQUENCY:
0427 case DEV_PM_QOS_MAX_FREQUENCY:
0428 curr_value = req->data.freq.pnode.prio;
0429 break;
0430 case DEV_PM_QOS_FLAGS:
0431 curr_value = req->data.flr.flags;
0432 break;
0433 default:
0434 return -EINVAL;
0435 }
0436
0437 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
0438 new_value);
0439 if (curr_value != new_value)
0440 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
0441
0442 return ret;
0443 }
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
0464 {
0465 int ret;
0466
0467 mutex_lock(&dev_pm_qos_mtx);
0468 ret = __dev_pm_qos_update_request(req, new_value);
0469 mutex_unlock(&dev_pm_qos_mtx);
0470 return ret;
0471 }
0472 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
0473
0474 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
0475 {
0476 int ret;
0477
0478 if (!req)
0479 return -EINVAL;
0480
0481 if (WARN(!dev_pm_qos_request_active(req),
0482 "%s() called for unknown object\n", __func__))
0483 return -EINVAL;
0484
0485 if (IS_ERR_OR_NULL(req->dev->power.qos))
0486 return -ENODEV;
0487
0488 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
0489 PM_QOS_DEFAULT_VALUE);
0490 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
0491 memset(req, 0, sizeof(*req));
0492 return ret;
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
0511 {
0512 int ret;
0513
0514 mutex_lock(&dev_pm_qos_mtx);
0515 ret = __dev_pm_qos_remove_request(req);
0516 mutex_unlock(&dev_pm_qos_mtx);
0517 return ret;
0518 }
0519 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
0536 enum dev_pm_qos_req_type type)
0537 {
0538 int ret = 0;
0539
0540 mutex_lock(&dev_pm_qos_mtx);
0541
0542 if (IS_ERR(dev->power.qos))
0543 ret = -ENODEV;
0544 else if (!dev->power.qos)
0545 ret = dev_pm_qos_constraints_allocate(dev);
0546
0547 if (ret)
0548 goto unlock;
0549
0550 switch (type) {
0551 case DEV_PM_QOS_RESUME_LATENCY:
0552 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
0553 notifier);
0554 break;
0555 case DEV_PM_QOS_MIN_FREQUENCY:
0556 ret = freq_qos_add_notifier(&dev->power.qos->freq,
0557 FREQ_QOS_MIN, notifier);
0558 break;
0559 case DEV_PM_QOS_MAX_FREQUENCY:
0560 ret = freq_qos_add_notifier(&dev->power.qos->freq,
0561 FREQ_QOS_MAX, notifier);
0562 break;
0563 default:
0564 WARN_ON(1);
0565 ret = -EINVAL;
0566 }
0567
0568 unlock:
0569 mutex_unlock(&dev_pm_qos_mtx);
0570 return ret;
0571 }
0572 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 int dev_pm_qos_remove_notifier(struct device *dev,
0586 struct notifier_block *notifier,
0587 enum dev_pm_qos_req_type type)
0588 {
0589 int ret = 0;
0590
0591 mutex_lock(&dev_pm_qos_mtx);
0592
0593
0594 if (IS_ERR_OR_NULL(dev->power.qos))
0595 goto unlock;
0596
0597 switch (type) {
0598 case DEV_PM_QOS_RESUME_LATENCY:
0599 ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
0600 notifier);
0601 break;
0602 case DEV_PM_QOS_MIN_FREQUENCY:
0603 ret = freq_qos_remove_notifier(&dev->power.qos->freq,
0604 FREQ_QOS_MIN, notifier);
0605 break;
0606 case DEV_PM_QOS_MAX_FREQUENCY:
0607 ret = freq_qos_remove_notifier(&dev->power.qos->freq,
0608 FREQ_QOS_MAX, notifier);
0609 break;
0610 default:
0611 WARN_ON(1);
0612 ret = -EINVAL;
0613 }
0614
0615 unlock:
0616 mutex_unlock(&dev_pm_qos_mtx);
0617 return ret;
0618 }
0619 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
0620
0621
0622
0623
0624
0625
0626
0627
0628 int dev_pm_qos_add_ancestor_request(struct device *dev,
0629 struct dev_pm_qos_request *req,
0630 enum dev_pm_qos_req_type type, s32 value)
0631 {
0632 struct device *ancestor = dev->parent;
0633 int ret = -ENODEV;
0634
0635 switch (type) {
0636 case DEV_PM_QOS_RESUME_LATENCY:
0637 while (ancestor && !ancestor->power.ignore_children)
0638 ancestor = ancestor->parent;
0639
0640 break;
0641 case DEV_PM_QOS_LATENCY_TOLERANCE:
0642 while (ancestor && !ancestor->power.set_latency_tolerance)
0643 ancestor = ancestor->parent;
0644
0645 break;
0646 default:
0647 ancestor = NULL;
0648 }
0649 if (ancestor)
0650 ret = dev_pm_qos_add_request(ancestor, req, type, value);
0651
0652 if (ret < 0)
0653 req->dev = NULL;
0654
0655 return ret;
0656 }
0657 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
0658
0659 static void __dev_pm_qos_drop_user_request(struct device *dev,
0660 enum dev_pm_qos_req_type type)
0661 {
0662 struct dev_pm_qos_request *req = NULL;
0663
0664 switch(type) {
0665 case DEV_PM_QOS_RESUME_LATENCY:
0666 req = dev->power.qos->resume_latency_req;
0667 dev->power.qos->resume_latency_req = NULL;
0668 break;
0669 case DEV_PM_QOS_LATENCY_TOLERANCE:
0670 req = dev->power.qos->latency_tolerance_req;
0671 dev->power.qos->latency_tolerance_req = NULL;
0672 break;
0673 case DEV_PM_QOS_FLAGS:
0674 req = dev->power.qos->flags_req;
0675 dev->power.qos->flags_req = NULL;
0676 break;
0677 default:
0678 WARN_ON(1);
0679 return;
0680 }
0681 __dev_pm_qos_remove_request(req);
0682 kfree(req);
0683 }
0684
0685 static void dev_pm_qos_drop_user_request(struct device *dev,
0686 enum dev_pm_qos_req_type type)
0687 {
0688 mutex_lock(&dev_pm_qos_mtx);
0689 __dev_pm_qos_drop_user_request(dev, type);
0690 mutex_unlock(&dev_pm_qos_mtx);
0691 }
0692
0693
0694
0695
0696
0697
0698 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
0699 {
0700 struct dev_pm_qos_request *req;
0701 int ret;
0702
0703 if (!device_is_registered(dev) || value < 0)
0704 return -EINVAL;
0705
0706 req = kzalloc(sizeof(*req), GFP_KERNEL);
0707 if (!req)
0708 return -ENOMEM;
0709
0710 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
0711 if (ret < 0) {
0712 kfree(req);
0713 return ret;
0714 }
0715
0716 mutex_lock(&dev_pm_qos_sysfs_mtx);
0717
0718 mutex_lock(&dev_pm_qos_mtx);
0719
0720 if (IS_ERR_OR_NULL(dev->power.qos))
0721 ret = -ENODEV;
0722 else if (dev->power.qos->resume_latency_req)
0723 ret = -EEXIST;
0724
0725 if (ret < 0) {
0726 __dev_pm_qos_remove_request(req);
0727 kfree(req);
0728 mutex_unlock(&dev_pm_qos_mtx);
0729 goto out;
0730 }
0731 dev->power.qos->resume_latency_req = req;
0732
0733 mutex_unlock(&dev_pm_qos_mtx);
0734
0735 ret = pm_qos_sysfs_add_resume_latency(dev);
0736 if (ret)
0737 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
0738
0739 out:
0740 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0741 return ret;
0742 }
0743 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
0744
0745 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
0746 {
0747 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
0748 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
0749 }
0750
0751
0752
0753
0754
0755 void dev_pm_qos_hide_latency_limit(struct device *dev)
0756 {
0757 mutex_lock(&dev_pm_qos_sysfs_mtx);
0758
0759 pm_qos_sysfs_remove_resume_latency(dev);
0760
0761 mutex_lock(&dev_pm_qos_mtx);
0762 __dev_pm_qos_hide_latency_limit(dev);
0763 mutex_unlock(&dev_pm_qos_mtx);
0764
0765 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0766 }
0767 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
0768
0769
0770
0771
0772
0773
0774 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
0775 {
0776 struct dev_pm_qos_request *req;
0777 int ret;
0778
0779 if (!device_is_registered(dev))
0780 return -EINVAL;
0781
0782 req = kzalloc(sizeof(*req), GFP_KERNEL);
0783 if (!req)
0784 return -ENOMEM;
0785
0786 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
0787 if (ret < 0) {
0788 kfree(req);
0789 return ret;
0790 }
0791
0792 pm_runtime_get_sync(dev);
0793 mutex_lock(&dev_pm_qos_sysfs_mtx);
0794
0795 mutex_lock(&dev_pm_qos_mtx);
0796
0797 if (IS_ERR_OR_NULL(dev->power.qos))
0798 ret = -ENODEV;
0799 else if (dev->power.qos->flags_req)
0800 ret = -EEXIST;
0801
0802 if (ret < 0) {
0803 __dev_pm_qos_remove_request(req);
0804 kfree(req);
0805 mutex_unlock(&dev_pm_qos_mtx);
0806 goto out;
0807 }
0808 dev->power.qos->flags_req = req;
0809
0810 mutex_unlock(&dev_pm_qos_mtx);
0811
0812 ret = pm_qos_sysfs_add_flags(dev);
0813 if (ret)
0814 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
0815
0816 out:
0817 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0818 pm_runtime_put(dev);
0819 return ret;
0820 }
0821 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
0822
0823 static void __dev_pm_qos_hide_flags(struct device *dev)
0824 {
0825 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
0826 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
0827 }
0828
0829
0830
0831
0832
0833 void dev_pm_qos_hide_flags(struct device *dev)
0834 {
0835 pm_runtime_get_sync(dev);
0836 mutex_lock(&dev_pm_qos_sysfs_mtx);
0837
0838 pm_qos_sysfs_remove_flags(dev);
0839
0840 mutex_lock(&dev_pm_qos_mtx);
0841 __dev_pm_qos_hide_flags(dev);
0842 mutex_unlock(&dev_pm_qos_mtx);
0843
0844 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0845 pm_runtime_put(dev);
0846 }
0847 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
0848
0849
0850
0851
0852
0853
0854
0855 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
0856 {
0857 s32 value;
0858 int ret;
0859
0860 pm_runtime_get_sync(dev);
0861 mutex_lock(&dev_pm_qos_mtx);
0862
0863 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
0864 ret = -EINVAL;
0865 goto out;
0866 }
0867
0868 value = dev_pm_qos_requested_flags(dev);
0869 if (set)
0870 value |= mask;
0871 else
0872 value &= ~mask;
0873
0874 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
0875
0876 out:
0877 mutex_unlock(&dev_pm_qos_mtx);
0878 pm_runtime_put(dev);
0879 return ret;
0880 }
0881
0882
0883
0884
0885
0886 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
0887 {
0888 s32 ret;
0889
0890 mutex_lock(&dev_pm_qos_mtx);
0891 ret = IS_ERR_OR_NULL(dev->power.qos)
0892 || !dev->power.qos->latency_tolerance_req ?
0893 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
0894 dev->power.qos->latency_tolerance_req->data.pnode.prio;
0895 mutex_unlock(&dev_pm_qos_mtx);
0896 return ret;
0897 }
0898
0899
0900
0901
0902
0903
0904 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
0905 {
0906 int ret;
0907
0908 mutex_lock(&dev_pm_qos_mtx);
0909
0910 if (IS_ERR_OR_NULL(dev->power.qos)
0911 || !dev->power.qos->latency_tolerance_req) {
0912 struct dev_pm_qos_request *req;
0913
0914 if (val < 0) {
0915 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
0916 ret = 0;
0917 else
0918 ret = -EINVAL;
0919 goto out;
0920 }
0921 req = kzalloc(sizeof(*req), GFP_KERNEL);
0922 if (!req) {
0923 ret = -ENOMEM;
0924 goto out;
0925 }
0926 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
0927 if (ret < 0) {
0928 kfree(req);
0929 goto out;
0930 }
0931 dev->power.qos->latency_tolerance_req = req;
0932 } else {
0933 if (val < 0) {
0934 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
0935 ret = 0;
0936 } else {
0937 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
0938 }
0939 }
0940
0941 out:
0942 mutex_unlock(&dev_pm_qos_mtx);
0943 return ret;
0944 }
0945 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
0946
0947
0948
0949
0950
0951 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
0952 {
0953 int ret;
0954
0955 if (!dev->power.set_latency_tolerance)
0956 return -EINVAL;
0957
0958 mutex_lock(&dev_pm_qos_sysfs_mtx);
0959 ret = pm_qos_sysfs_add_latency_tolerance(dev);
0960 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0961
0962 return ret;
0963 }
0964 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
0965
0966
0967
0968
0969
0970 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
0971 {
0972 mutex_lock(&dev_pm_qos_sysfs_mtx);
0973 pm_qos_sysfs_remove_latency_tolerance(dev);
0974 mutex_unlock(&dev_pm_qos_sysfs_mtx);
0975
0976
0977 pm_runtime_get_sync(dev);
0978 dev_pm_qos_update_user_latency_tolerance(dev,
0979 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
0980 pm_runtime_put(dev);
0981 }
0982 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);