0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/sched/mm.h>
0009 #include <linux/ktime.h>
0010 #include <linux/hrtimer.h>
0011 #include <linux/export.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/pm_wakeirq.h>
0014 #include <trace/events/rpm.h>
0015
0016 #include "../base.h"
0017 #include "power.h"
0018
0019 typedef int (*pm_callback_t)(struct device *);
0020
0021 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
0022 {
0023 pm_callback_t cb;
0024 const struct dev_pm_ops *ops;
0025
0026 if (dev->pm_domain)
0027 ops = &dev->pm_domain->ops;
0028 else if (dev->type && dev->type->pm)
0029 ops = dev->type->pm;
0030 else if (dev->class && dev->class->pm)
0031 ops = dev->class->pm;
0032 else if (dev->bus && dev->bus->pm)
0033 ops = dev->bus->pm;
0034 else
0035 ops = NULL;
0036
0037 if (ops)
0038 cb = *(pm_callback_t *)((void *)ops + cb_offset);
0039 else
0040 cb = NULL;
0041
0042 if (!cb && dev->driver && dev->driver->pm)
0043 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
0044
0045 return cb;
0046 }
0047
0048 #define RPM_GET_CALLBACK(dev, callback) \
0049 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
0050
0051 static int rpm_resume(struct device *dev, int rpmflags);
0052 static int rpm_suspend(struct device *dev, int rpmflags);
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065 static void update_pm_runtime_accounting(struct device *dev)
0066 {
0067 u64 now, last, delta;
0068
0069 if (dev->power.disable_depth > 0)
0070 return;
0071
0072 last = dev->power.accounting_timestamp;
0073
0074 now = ktime_get_mono_fast_ns();
0075 dev->power.accounting_timestamp = now;
0076
0077
0078
0079
0080
0081
0082 if (now < last)
0083 return;
0084
0085 delta = now - last;
0086
0087 if (dev->power.runtime_status == RPM_SUSPENDED)
0088 dev->power.suspended_time += delta;
0089 else
0090 dev->power.active_time += delta;
0091 }
0092
0093 static void __update_runtime_status(struct device *dev, enum rpm_status status)
0094 {
0095 update_pm_runtime_accounting(dev);
0096 dev->power.runtime_status = status;
0097 }
0098
0099 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
0100 {
0101 u64 time;
0102 unsigned long flags;
0103
0104 spin_lock_irqsave(&dev->power.lock, flags);
0105
0106 update_pm_runtime_accounting(dev);
0107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
0108
0109 spin_unlock_irqrestore(&dev->power.lock, flags);
0110
0111 return time;
0112 }
0113
0114 u64 pm_runtime_active_time(struct device *dev)
0115 {
0116 return rpm_get_accounted_time(dev, false);
0117 }
0118
0119 u64 pm_runtime_suspended_time(struct device *dev)
0120 {
0121 return rpm_get_accounted_time(dev, true);
0122 }
0123 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
0124
0125
0126
0127
0128
0129 static void pm_runtime_deactivate_timer(struct device *dev)
0130 {
0131 if (dev->power.timer_expires > 0) {
0132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
0133 dev->power.timer_expires = 0;
0134 }
0135 }
0136
0137
0138
0139
0140
0141 static void pm_runtime_cancel_pending(struct device *dev)
0142 {
0143 pm_runtime_deactivate_timer(dev);
0144
0145
0146
0147
0148 dev->power.request = RPM_REQ_NONE;
0149 }
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
0164 {
0165 int autosuspend_delay;
0166 u64 expires;
0167
0168 if (!dev->power.use_autosuspend)
0169 return 0;
0170
0171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
0172 if (autosuspend_delay < 0)
0173 return 0;
0174
0175 expires = READ_ONCE(dev->power.last_busy);
0176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
0177 if (expires > ktime_get_mono_fast_ns())
0178 return expires;
0179
0180 return 0;
0181 }
0182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
0183
0184 static int dev_memalloc_noio(struct device *dev, void *data)
0185 {
0186 return dev->power.memalloc_noio;
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
0218 {
0219 static DEFINE_MUTEX(dev_hotplug_mutex);
0220
0221 mutex_lock(&dev_hotplug_mutex);
0222 for (;;) {
0223 bool enabled;
0224
0225
0226 spin_lock_irq(&dev->power.lock);
0227 enabled = dev->power.memalloc_noio;
0228 dev->power.memalloc_noio = enable;
0229 spin_unlock_irq(&dev->power.lock);
0230
0231
0232
0233
0234
0235 if (enabled && enable)
0236 break;
0237
0238 dev = dev->parent;
0239
0240
0241
0242
0243
0244
0245 if (!dev || (!enable &&
0246 device_for_each_child(dev, NULL,
0247 dev_memalloc_noio)))
0248 break;
0249 }
0250 mutex_unlock(&dev_hotplug_mutex);
0251 }
0252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
0253
0254
0255
0256
0257
0258 static int rpm_check_suspend_allowed(struct device *dev)
0259 {
0260 int retval = 0;
0261
0262 if (dev->power.runtime_error)
0263 retval = -EINVAL;
0264 else if (dev->power.disable_depth > 0)
0265 retval = -EACCES;
0266 else if (atomic_read(&dev->power.usage_count))
0267 retval = -EAGAIN;
0268 else if (!dev->power.ignore_children &&
0269 atomic_read(&dev->power.child_count))
0270 retval = -EBUSY;
0271
0272
0273 else if ((dev->power.deferred_resume
0274 && dev->power.runtime_status == RPM_SUSPENDING)
0275 || (dev->power.request_pending
0276 && dev->power.request == RPM_REQ_RESUME))
0277 retval = -EAGAIN;
0278 else if (__dev_pm_qos_resume_latency(dev) == 0)
0279 retval = -EPERM;
0280 else if (dev->power.runtime_status == RPM_SUSPENDED)
0281 retval = 1;
0282
0283 return retval;
0284 }
0285
0286 static int rpm_get_suppliers(struct device *dev)
0287 {
0288 struct device_link *link;
0289
0290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
0291 device_links_read_lock_held()) {
0292 int retval;
0293
0294 if (!(link->flags & DL_FLAG_PM_RUNTIME))
0295 continue;
0296
0297 retval = pm_runtime_get_sync(link->supplier);
0298
0299 if (retval < 0 && retval != -EACCES) {
0300 pm_runtime_put_noidle(link->supplier);
0301 return retval;
0302 }
0303 refcount_inc(&link->rpm_active);
0304 }
0305 return 0;
0306 }
0307
0308
0309
0310
0311
0312
0313
0314 void pm_runtime_release_supplier(struct device_link *link)
0315 {
0316 struct device *supplier = link->supplier;
0317
0318
0319
0320
0321
0322
0323
0324 while (refcount_dec_not_one(&link->rpm_active) &&
0325 atomic_read(&supplier->power.usage_count) > 0)
0326 pm_runtime_put_noidle(supplier);
0327 }
0328
0329 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
0330 {
0331 struct device_link *link;
0332
0333 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
0334 device_links_read_lock_held()) {
0335 pm_runtime_release_supplier(link);
0336 if (try_to_suspend)
0337 pm_request_idle(link->supplier);
0338 }
0339 }
0340
0341 static void rpm_put_suppliers(struct device *dev)
0342 {
0343 __rpm_put_suppliers(dev, true);
0344 }
0345
0346 static void rpm_suspend_suppliers(struct device *dev)
0347 {
0348 struct device_link *link;
0349 int idx = device_links_read_lock();
0350
0351 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
0352 device_links_read_lock_held())
0353 pm_request_idle(link->supplier);
0354
0355 device_links_read_unlock(idx);
0356 }
0357
0358
0359
0360
0361
0362
0363 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
0364 __releases(&dev->power.lock) __acquires(&dev->power.lock)
0365 {
0366 int retval = 0, idx;
0367 bool use_links = dev->power.links_count > 0;
0368
0369 if (dev->power.irq_safe) {
0370 spin_unlock(&dev->power.lock);
0371 } else {
0372 spin_unlock_irq(&dev->power.lock);
0373
0374
0375
0376
0377
0378
0379
0380
0381 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
0382 idx = device_links_read_lock();
0383
0384 retval = rpm_get_suppliers(dev);
0385 if (retval) {
0386 rpm_put_suppliers(dev);
0387 goto fail;
0388 }
0389
0390 device_links_read_unlock(idx);
0391 }
0392 }
0393
0394 if (cb)
0395 retval = cb(dev);
0396
0397 if (dev->power.irq_safe) {
0398 spin_lock(&dev->power.lock);
0399 } else {
0400
0401
0402
0403
0404
0405
0406
0407 if (use_links
0408 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
0409 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
0410 idx = device_links_read_lock();
0411
0412 __rpm_put_suppliers(dev, false);
0413
0414 fail:
0415 device_links_read_unlock(idx);
0416 }
0417
0418 spin_lock_irq(&dev->power.lock);
0419 }
0420
0421 return retval;
0422 }
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 static int rpm_idle(struct device *dev, int rpmflags)
0438 {
0439 int (*callback)(struct device *);
0440 int retval;
0441
0442 trace_rpm_idle_rcuidle(dev, rpmflags);
0443 retval = rpm_check_suspend_allowed(dev);
0444 if (retval < 0)
0445 ;
0446
0447
0448 else if (dev->power.runtime_status != RPM_ACTIVE)
0449 retval = -EAGAIN;
0450
0451
0452
0453
0454
0455 else if (dev->power.request_pending &&
0456 dev->power.request > RPM_REQ_IDLE)
0457 retval = -EAGAIN;
0458
0459
0460 else if (dev->power.idle_notification)
0461 retval = -EINPROGRESS;
0462 if (retval)
0463 goto out;
0464
0465
0466 dev->power.request = RPM_REQ_NONE;
0467
0468 callback = RPM_GET_CALLBACK(dev, runtime_idle);
0469
0470
0471 if (!callback || dev->power.no_callbacks)
0472 goto out;
0473
0474
0475 if (rpmflags & RPM_ASYNC) {
0476 dev->power.request = RPM_REQ_IDLE;
0477 if (!dev->power.request_pending) {
0478 dev->power.request_pending = true;
0479 queue_work(pm_wq, &dev->power.work);
0480 }
0481 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
0482 return 0;
0483 }
0484
0485 dev->power.idle_notification = true;
0486
0487 retval = __rpm_callback(callback, dev);
0488
0489 dev->power.idle_notification = false;
0490 wake_up_all(&dev->power.wait_queue);
0491
0492 out:
0493 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
0494 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
0495 }
0496
0497
0498
0499
0500
0501
0502 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
0503 {
0504 int retval;
0505
0506 if (dev->power.memalloc_noio) {
0507 unsigned int noio_flag;
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 noio_flag = memalloc_noio_save();
0519 retval = __rpm_callback(cb, dev);
0520 memalloc_noio_restore(noio_flag);
0521 } else {
0522 retval = __rpm_callback(cb, dev);
0523 }
0524
0525 dev->power.runtime_error = retval;
0526 return retval != -EACCES ? retval : -EIO;
0527 }
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 static int rpm_suspend(struct device *dev, int rpmflags)
0551 __releases(&dev->power.lock) __acquires(&dev->power.lock)
0552 {
0553 int (*callback)(struct device *);
0554 struct device *parent = NULL;
0555 int retval;
0556
0557 trace_rpm_suspend_rcuidle(dev, rpmflags);
0558
0559 repeat:
0560 retval = rpm_check_suspend_allowed(dev);
0561 if (retval < 0)
0562 goto out;
0563
0564
0565 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
0566 retval = -EAGAIN;
0567 if (retval)
0568 goto out;
0569
0570
0571 if ((rpmflags & RPM_AUTO)
0572 && dev->power.runtime_status != RPM_SUSPENDING) {
0573 u64 expires = pm_runtime_autosuspend_expiration(dev);
0574
0575 if (expires != 0) {
0576
0577 dev->power.request = RPM_REQ_NONE;
0578
0579
0580
0581
0582
0583
0584
0585
0586 if (!(dev->power.timer_expires &&
0587 dev->power.timer_expires <= expires)) {
0588
0589
0590
0591
0592 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
0593 (NSEC_PER_MSEC >> 2);
0594
0595 dev->power.timer_expires = expires;
0596 hrtimer_start_range_ns(&dev->power.suspend_timer,
0597 ns_to_ktime(expires),
0598 slack,
0599 HRTIMER_MODE_ABS);
0600 }
0601 dev->power.timer_autosuspends = 1;
0602 goto out;
0603 }
0604 }
0605
0606
0607 pm_runtime_cancel_pending(dev);
0608
0609 if (dev->power.runtime_status == RPM_SUSPENDING) {
0610 DEFINE_WAIT(wait);
0611
0612 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
0613 retval = -EINPROGRESS;
0614 goto out;
0615 }
0616
0617 if (dev->power.irq_safe) {
0618 spin_unlock(&dev->power.lock);
0619
0620 cpu_relax();
0621
0622 spin_lock(&dev->power.lock);
0623 goto repeat;
0624 }
0625
0626
0627 for (;;) {
0628 prepare_to_wait(&dev->power.wait_queue, &wait,
0629 TASK_UNINTERRUPTIBLE);
0630 if (dev->power.runtime_status != RPM_SUSPENDING)
0631 break;
0632
0633 spin_unlock_irq(&dev->power.lock);
0634
0635 schedule();
0636
0637 spin_lock_irq(&dev->power.lock);
0638 }
0639 finish_wait(&dev->power.wait_queue, &wait);
0640 goto repeat;
0641 }
0642
0643 if (dev->power.no_callbacks)
0644 goto no_callback;
0645
0646
0647 if (rpmflags & RPM_ASYNC) {
0648 dev->power.request = (rpmflags & RPM_AUTO) ?
0649 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
0650 if (!dev->power.request_pending) {
0651 dev->power.request_pending = true;
0652 queue_work(pm_wq, &dev->power.work);
0653 }
0654 goto out;
0655 }
0656
0657 __update_runtime_status(dev, RPM_SUSPENDING);
0658
0659 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
0660
0661 dev_pm_enable_wake_irq_check(dev, true);
0662 retval = rpm_callback(callback, dev);
0663 if (retval)
0664 goto fail;
0665
0666 dev_pm_enable_wake_irq_complete(dev);
0667
0668 no_callback:
0669 __update_runtime_status(dev, RPM_SUSPENDED);
0670 pm_runtime_deactivate_timer(dev);
0671
0672 if (dev->parent) {
0673 parent = dev->parent;
0674 atomic_add_unless(&parent->power.child_count, -1, 0);
0675 }
0676 wake_up_all(&dev->power.wait_queue);
0677
0678 if (dev->power.deferred_resume) {
0679 dev->power.deferred_resume = false;
0680 rpm_resume(dev, 0);
0681 retval = -EAGAIN;
0682 goto out;
0683 }
0684
0685 if (dev->power.irq_safe)
0686 goto out;
0687
0688
0689 if (parent && !parent->power.ignore_children) {
0690 spin_unlock(&dev->power.lock);
0691
0692 spin_lock(&parent->power.lock);
0693 rpm_idle(parent, RPM_ASYNC);
0694 spin_unlock(&parent->power.lock);
0695
0696 spin_lock(&dev->power.lock);
0697 }
0698
0699 if (dev->power.links_count > 0) {
0700 spin_unlock_irq(&dev->power.lock);
0701
0702 rpm_suspend_suppliers(dev);
0703
0704 spin_lock_irq(&dev->power.lock);
0705 }
0706
0707 out:
0708 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
0709
0710 return retval;
0711
0712 fail:
0713 dev_pm_disable_wake_irq_check(dev, true);
0714 __update_runtime_status(dev, RPM_ACTIVE);
0715 dev->power.deferred_resume = false;
0716 wake_up_all(&dev->power.wait_queue);
0717
0718 if (retval == -EAGAIN || retval == -EBUSY) {
0719 dev->power.runtime_error = 0;
0720
0721
0722
0723
0724
0725
0726
0727 if ((rpmflags & RPM_AUTO) &&
0728 pm_runtime_autosuspend_expiration(dev) != 0)
0729 goto repeat;
0730 } else {
0731 pm_runtime_cancel_pending(dev);
0732 }
0733 goto out;
0734 }
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 static int rpm_resume(struct device *dev, int rpmflags)
0754 __releases(&dev->power.lock) __acquires(&dev->power.lock)
0755 {
0756 int (*callback)(struct device *);
0757 struct device *parent = NULL;
0758 int retval = 0;
0759
0760 trace_rpm_resume_rcuidle(dev, rpmflags);
0761
0762 repeat:
0763 if (dev->power.runtime_error) {
0764 retval = -EINVAL;
0765 } else if (dev->power.disable_depth > 0) {
0766 if (dev->power.runtime_status == RPM_ACTIVE &&
0767 dev->power.last_status == RPM_ACTIVE)
0768 retval = 1;
0769 else
0770 retval = -EACCES;
0771 }
0772 if (retval)
0773 goto out;
0774
0775
0776
0777
0778
0779
0780
0781 dev->power.request = RPM_REQ_NONE;
0782 if (!dev->power.timer_autosuspends)
0783 pm_runtime_deactivate_timer(dev);
0784
0785 if (dev->power.runtime_status == RPM_ACTIVE) {
0786 retval = 1;
0787 goto out;
0788 }
0789
0790 if (dev->power.runtime_status == RPM_RESUMING
0791 || dev->power.runtime_status == RPM_SUSPENDING) {
0792 DEFINE_WAIT(wait);
0793
0794 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
0795 if (dev->power.runtime_status == RPM_SUSPENDING)
0796 dev->power.deferred_resume = true;
0797 else
0798 retval = -EINPROGRESS;
0799 goto out;
0800 }
0801
0802 if (dev->power.irq_safe) {
0803 spin_unlock(&dev->power.lock);
0804
0805 cpu_relax();
0806
0807 spin_lock(&dev->power.lock);
0808 goto repeat;
0809 }
0810
0811
0812 for (;;) {
0813 prepare_to_wait(&dev->power.wait_queue, &wait,
0814 TASK_UNINTERRUPTIBLE);
0815 if (dev->power.runtime_status != RPM_RESUMING
0816 && dev->power.runtime_status != RPM_SUSPENDING)
0817 break;
0818
0819 spin_unlock_irq(&dev->power.lock);
0820
0821 schedule();
0822
0823 spin_lock_irq(&dev->power.lock);
0824 }
0825 finish_wait(&dev->power.wait_queue, &wait);
0826 goto repeat;
0827 }
0828
0829
0830
0831
0832
0833
0834 if (dev->power.no_callbacks && !parent && dev->parent) {
0835 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
0836 if (dev->parent->power.disable_depth > 0
0837 || dev->parent->power.ignore_children
0838 || dev->parent->power.runtime_status == RPM_ACTIVE) {
0839 atomic_inc(&dev->parent->power.child_count);
0840 spin_unlock(&dev->parent->power.lock);
0841 retval = 1;
0842 goto no_callback;
0843 }
0844 spin_unlock(&dev->parent->power.lock);
0845 }
0846
0847
0848 if (rpmflags & RPM_ASYNC) {
0849 dev->power.request = RPM_REQ_RESUME;
0850 if (!dev->power.request_pending) {
0851 dev->power.request_pending = true;
0852 queue_work(pm_wq, &dev->power.work);
0853 }
0854 retval = 0;
0855 goto out;
0856 }
0857
0858 if (!parent && dev->parent) {
0859
0860
0861
0862
0863
0864 parent = dev->parent;
0865 if (dev->power.irq_safe)
0866 goto skip_parent;
0867 spin_unlock(&dev->power.lock);
0868
0869 pm_runtime_get_noresume(parent);
0870
0871 spin_lock(&parent->power.lock);
0872
0873
0874
0875
0876 if (!parent->power.disable_depth
0877 && !parent->power.ignore_children) {
0878 rpm_resume(parent, 0);
0879 if (parent->power.runtime_status != RPM_ACTIVE)
0880 retval = -EBUSY;
0881 }
0882 spin_unlock(&parent->power.lock);
0883
0884 spin_lock(&dev->power.lock);
0885 if (retval)
0886 goto out;
0887 goto repeat;
0888 }
0889 skip_parent:
0890
0891 if (dev->power.no_callbacks)
0892 goto no_callback;
0893
0894 __update_runtime_status(dev, RPM_RESUMING);
0895
0896 callback = RPM_GET_CALLBACK(dev, runtime_resume);
0897
0898 dev_pm_disable_wake_irq_check(dev, false);
0899 retval = rpm_callback(callback, dev);
0900 if (retval) {
0901 __update_runtime_status(dev, RPM_SUSPENDED);
0902 pm_runtime_cancel_pending(dev);
0903 dev_pm_enable_wake_irq_check(dev, false);
0904 } else {
0905 no_callback:
0906 __update_runtime_status(dev, RPM_ACTIVE);
0907 pm_runtime_mark_last_busy(dev);
0908 if (parent)
0909 atomic_inc(&parent->power.child_count);
0910 }
0911 wake_up_all(&dev->power.wait_queue);
0912
0913 if (retval >= 0)
0914 rpm_idle(dev, RPM_ASYNC);
0915
0916 out:
0917 if (parent && !dev->power.irq_safe) {
0918 spin_unlock_irq(&dev->power.lock);
0919
0920 pm_runtime_put(parent);
0921
0922 spin_lock_irq(&dev->power.lock);
0923 }
0924
0925 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
0926
0927 return retval;
0928 }
0929
0930
0931
0932
0933
0934
0935
0936
0937 static void pm_runtime_work(struct work_struct *work)
0938 {
0939 struct device *dev = container_of(work, struct device, power.work);
0940 enum rpm_request req;
0941
0942 spin_lock_irq(&dev->power.lock);
0943
0944 if (!dev->power.request_pending)
0945 goto out;
0946
0947 req = dev->power.request;
0948 dev->power.request = RPM_REQ_NONE;
0949 dev->power.request_pending = false;
0950
0951 switch (req) {
0952 case RPM_REQ_NONE:
0953 break;
0954 case RPM_REQ_IDLE:
0955 rpm_idle(dev, RPM_NOWAIT);
0956 break;
0957 case RPM_REQ_SUSPEND:
0958 rpm_suspend(dev, RPM_NOWAIT);
0959 break;
0960 case RPM_REQ_AUTOSUSPEND:
0961 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
0962 break;
0963 case RPM_REQ_RESUME:
0964 rpm_resume(dev, RPM_NOWAIT);
0965 break;
0966 }
0967
0968 out:
0969 spin_unlock_irq(&dev->power.lock);
0970 }
0971
0972
0973
0974
0975
0976
0977
0978 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
0979 {
0980 struct device *dev = container_of(timer, struct device, power.suspend_timer);
0981 unsigned long flags;
0982 u64 expires;
0983
0984 spin_lock_irqsave(&dev->power.lock, flags);
0985
0986 expires = dev->power.timer_expires;
0987
0988
0989
0990
0991 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
0992 dev->power.timer_expires = 0;
0993 rpm_suspend(dev, dev->power.timer_autosuspends ?
0994 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
0995 }
0996
0997 spin_unlock_irqrestore(&dev->power.lock, flags);
0998
0999 return HRTIMER_NORESTART;
1000 }
1001
1002
1003
1004
1005
1006
1007 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1008 {
1009 unsigned long flags;
1010 u64 expires;
1011 int retval;
1012
1013 spin_lock_irqsave(&dev->power.lock, flags);
1014
1015 if (!delay) {
1016 retval = rpm_suspend(dev, RPM_ASYNC);
1017 goto out;
1018 }
1019
1020 retval = rpm_check_suspend_allowed(dev);
1021 if (retval)
1022 goto out;
1023
1024
1025 pm_runtime_cancel_pending(dev);
1026
1027 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1028 dev->power.timer_expires = expires;
1029 dev->power.timer_autosuspends = 0;
1030 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1031
1032 out:
1033 spin_unlock_irqrestore(&dev->power.lock, flags);
1034
1035 return retval;
1036 }
1037 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1038
1039 static int rpm_drop_usage_count(struct device *dev)
1040 {
1041 int ret;
1042
1043 ret = atomic_sub_return(1, &dev->power.usage_count);
1044 if (ret >= 0)
1045 return ret;
1046
1047
1048
1049
1050
1051
1052
1053 atomic_inc(&dev->power.usage_count);
1054 dev_warn(dev, "Runtime PM usage count underflow!\n");
1055 return -EINVAL;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 int __pm_runtime_idle(struct device *dev, int rpmflags)
1072 {
1073 unsigned long flags;
1074 int retval;
1075
1076 if (rpmflags & RPM_GET_PUT) {
1077 retval = rpm_drop_usage_count(dev);
1078 if (retval < 0) {
1079 return retval;
1080 } else if (retval > 0) {
1081 trace_rpm_usage_rcuidle(dev, rpmflags);
1082 return 0;
1083 }
1084 }
1085
1086 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1087
1088 spin_lock_irqsave(&dev->power.lock, flags);
1089 retval = rpm_idle(dev, rpmflags);
1090 spin_unlock_irqrestore(&dev->power.lock, flags);
1091
1092 return retval;
1093 }
1094 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1110 {
1111 unsigned long flags;
1112 int retval;
1113
1114 if (rpmflags & RPM_GET_PUT) {
1115 retval = rpm_drop_usage_count(dev);
1116 if (retval < 0) {
1117 return retval;
1118 } else if (retval > 0) {
1119 trace_rpm_usage_rcuidle(dev, rpmflags);
1120 return 0;
1121 }
1122 }
1123
1124 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1125
1126 spin_lock_irqsave(&dev->power.lock, flags);
1127 retval = rpm_suspend(dev, rpmflags);
1128 spin_unlock_irqrestore(&dev->power.lock, flags);
1129
1130 return retval;
1131 }
1132 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 int __pm_runtime_resume(struct device *dev, int rpmflags)
1146 {
1147 unsigned long flags;
1148 int retval;
1149
1150 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1151 dev->power.runtime_status != RPM_ACTIVE);
1152
1153 if (rpmflags & RPM_GET_PUT)
1154 atomic_inc(&dev->power.usage_count);
1155
1156 spin_lock_irqsave(&dev->power.lock, flags);
1157 retval = rpm_resume(dev, rpmflags);
1158 spin_unlock_irqrestore(&dev->power.lock, flags);
1159
1160 return retval;
1161 }
1162 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1187 {
1188 unsigned long flags;
1189 int retval;
1190
1191 spin_lock_irqsave(&dev->power.lock, flags);
1192 if (dev->power.disable_depth > 0) {
1193 retval = -EINVAL;
1194 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1195 retval = 0;
1196 } else if (ign_usage_count) {
1197 retval = 1;
1198 atomic_inc(&dev->power.usage_count);
1199 } else {
1200 retval = atomic_inc_not_zero(&dev->power.usage_count);
1201 }
1202 trace_rpm_usage_rcuidle(dev, 0);
1203 spin_unlock_irqrestore(&dev->power.lock, flags);
1204
1205 return retval;
1206 }
1207 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1234 {
1235 struct device *parent = dev->parent;
1236 bool notify_parent = false;
1237 unsigned long flags;
1238 int error = 0;
1239
1240 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1241 return -EINVAL;
1242
1243 spin_lock_irqsave(&dev->power.lock, flags);
1244
1245
1246
1247
1248
1249 if (dev->power.runtime_error || dev->power.disable_depth)
1250 dev->power.disable_depth++;
1251 else
1252 error = -EAGAIN;
1253
1254 spin_unlock_irqrestore(&dev->power.lock, flags);
1255
1256 if (error)
1257 return error;
1258
1259
1260
1261
1262
1263
1264
1265 if (status == RPM_ACTIVE) {
1266 int idx = device_links_read_lock();
1267
1268 error = rpm_get_suppliers(dev);
1269 if (error)
1270 status = RPM_SUSPENDED;
1271
1272 device_links_read_unlock(idx);
1273 }
1274
1275 spin_lock_irqsave(&dev->power.lock, flags);
1276
1277 if (dev->power.runtime_status == status || !parent)
1278 goto out_set;
1279
1280 if (status == RPM_SUSPENDED) {
1281 atomic_add_unless(&parent->power.child_count, -1, 0);
1282 notify_parent = !parent->power.ignore_children;
1283 } else {
1284 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1285
1286
1287
1288
1289
1290
1291 if (!parent->power.disable_depth
1292 && !parent->power.ignore_children
1293 && parent->power.runtime_status != RPM_ACTIVE) {
1294 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1295 dev_name(dev),
1296 dev_name(parent));
1297 error = -EBUSY;
1298 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1299 atomic_inc(&parent->power.child_count);
1300 }
1301
1302 spin_unlock(&parent->power.lock);
1303
1304 if (error) {
1305 status = RPM_SUSPENDED;
1306 goto out;
1307 }
1308 }
1309
1310 out_set:
1311 __update_runtime_status(dev, status);
1312 if (!error)
1313 dev->power.runtime_error = 0;
1314
1315 out:
1316 spin_unlock_irqrestore(&dev->power.lock, flags);
1317
1318 if (notify_parent)
1319 pm_request_idle(parent);
1320
1321 if (status == RPM_SUSPENDED) {
1322 int idx = device_links_read_lock();
1323
1324 rpm_put_suppliers(dev);
1325
1326 device_links_read_unlock(idx);
1327 }
1328
1329 pm_runtime_enable(dev);
1330
1331 return error;
1332 }
1333 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 static void __pm_runtime_barrier(struct device *dev)
1345 {
1346 pm_runtime_deactivate_timer(dev);
1347
1348 if (dev->power.request_pending) {
1349 dev->power.request = RPM_REQ_NONE;
1350 spin_unlock_irq(&dev->power.lock);
1351
1352 cancel_work_sync(&dev->power.work);
1353
1354 spin_lock_irq(&dev->power.lock);
1355 dev->power.request_pending = false;
1356 }
1357
1358 if (dev->power.runtime_status == RPM_SUSPENDING
1359 || dev->power.runtime_status == RPM_RESUMING
1360 || dev->power.idle_notification) {
1361 DEFINE_WAIT(wait);
1362
1363
1364 for (;;) {
1365 prepare_to_wait(&dev->power.wait_queue, &wait,
1366 TASK_UNINTERRUPTIBLE);
1367 if (dev->power.runtime_status != RPM_SUSPENDING
1368 && dev->power.runtime_status != RPM_RESUMING
1369 && !dev->power.idle_notification)
1370 break;
1371 spin_unlock_irq(&dev->power.lock);
1372
1373 schedule();
1374
1375 spin_lock_irq(&dev->power.lock);
1376 }
1377 finish_wait(&dev->power.wait_queue, &wait);
1378 }
1379 }
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 int pm_runtime_barrier(struct device *dev)
1396 {
1397 int retval = 0;
1398
1399 pm_runtime_get_noresume(dev);
1400 spin_lock_irq(&dev->power.lock);
1401
1402 if (dev->power.request_pending
1403 && dev->power.request == RPM_REQ_RESUME) {
1404 rpm_resume(dev, 0);
1405 retval = 1;
1406 }
1407
1408 __pm_runtime_barrier(dev);
1409
1410 spin_unlock_irq(&dev->power.lock);
1411 pm_runtime_put_noidle(dev);
1412
1413 return retval;
1414 }
1415 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 void __pm_runtime_disable(struct device *dev, bool check_resume)
1432 {
1433 spin_lock_irq(&dev->power.lock);
1434
1435 if (dev->power.disable_depth > 0) {
1436 dev->power.disable_depth++;
1437 goto out;
1438 }
1439
1440
1441
1442
1443
1444
1445 if (check_resume && dev->power.request_pending
1446 && dev->power.request == RPM_REQ_RESUME) {
1447
1448
1449
1450
1451 pm_runtime_get_noresume(dev);
1452
1453 rpm_resume(dev, 0);
1454
1455 pm_runtime_put_noidle(dev);
1456 }
1457
1458
1459 update_pm_runtime_accounting(dev);
1460
1461 if (!dev->power.disable_depth++) {
1462 __pm_runtime_barrier(dev);
1463 dev->power.last_status = dev->power.runtime_status;
1464 }
1465
1466 out:
1467 spin_unlock_irq(&dev->power.lock);
1468 }
1469 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1470
1471
1472
1473
1474
1475 void pm_runtime_enable(struct device *dev)
1476 {
1477 unsigned long flags;
1478
1479 spin_lock_irqsave(&dev->power.lock, flags);
1480
1481 if (!dev->power.disable_depth) {
1482 dev_warn(dev, "Unbalanced %s!\n", __func__);
1483 goto out;
1484 }
1485
1486 if (--dev->power.disable_depth > 0)
1487 goto out;
1488
1489 dev->power.last_status = RPM_INVALID;
1490 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1491
1492 if (dev->power.runtime_status == RPM_SUSPENDED &&
1493 !dev->power.ignore_children &&
1494 atomic_read(&dev->power.child_count) > 0)
1495 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1496
1497 out:
1498 spin_unlock_irqrestore(&dev->power.lock, flags);
1499 }
1500 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1501
1502 static void pm_runtime_disable_action(void *data)
1503 {
1504 pm_runtime_dont_use_autosuspend(data);
1505 pm_runtime_disable(data);
1506 }
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 int devm_pm_runtime_enable(struct device *dev)
1517 {
1518 pm_runtime_enable(dev);
1519
1520 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1521 }
1522 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 void pm_runtime_forbid(struct device *dev)
1533 {
1534 spin_lock_irq(&dev->power.lock);
1535 if (!dev->power.runtime_auto)
1536 goto out;
1537
1538 dev->power.runtime_auto = false;
1539 atomic_inc(&dev->power.usage_count);
1540 rpm_resume(dev, 0);
1541
1542 out:
1543 spin_unlock_irq(&dev->power.lock);
1544 }
1545 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1546
1547
1548
1549
1550
1551
1552
1553 void pm_runtime_allow(struct device *dev)
1554 {
1555 int ret;
1556
1557 spin_lock_irq(&dev->power.lock);
1558 if (dev->power.runtime_auto)
1559 goto out;
1560
1561 dev->power.runtime_auto = true;
1562 ret = rpm_drop_usage_count(dev);
1563 if (ret == 0)
1564 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1565 else if (ret > 0)
1566 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1567
1568 out:
1569 spin_unlock_irq(&dev->power.lock);
1570 }
1571 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 void pm_runtime_no_callbacks(struct device *dev)
1582 {
1583 spin_lock_irq(&dev->power.lock);
1584 dev->power.no_callbacks = 1;
1585 spin_unlock_irq(&dev->power.lock);
1586 if (device_is_registered(dev))
1587 rpm_sysfs_remove(dev);
1588 }
1589 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 void pm_runtime_irq_safe(struct device *dev)
1603 {
1604 if (dev->parent)
1605 pm_runtime_get_sync(dev->parent);
1606 spin_lock_irq(&dev->power.lock);
1607 dev->power.irq_safe = 1;
1608 spin_unlock_irq(&dev->power.lock);
1609 }
1610 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1624 {
1625 int delay = dev->power.autosuspend_delay;
1626
1627
1628 if (dev->power.use_autosuspend && delay < 0) {
1629
1630
1631 if (!old_use || old_delay >= 0) {
1632 atomic_inc(&dev->power.usage_count);
1633 rpm_resume(dev, 0);
1634 } else {
1635 trace_rpm_usage_rcuidle(dev, 0);
1636 }
1637 }
1638
1639
1640 else {
1641
1642
1643 if (old_use && old_delay < 0)
1644 atomic_dec(&dev->power.usage_count);
1645
1646
1647 rpm_idle(dev, RPM_AUTO);
1648 }
1649 }
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1661 {
1662 int old_delay, old_use;
1663
1664 spin_lock_irq(&dev->power.lock);
1665 old_delay = dev->power.autosuspend_delay;
1666 old_use = dev->power.use_autosuspend;
1667 dev->power.autosuspend_delay = delay;
1668 update_autosuspend(dev, old_delay, old_use);
1669 spin_unlock_irq(&dev->power.lock);
1670 }
1671 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1682 {
1683 int old_delay, old_use;
1684
1685 spin_lock_irq(&dev->power.lock);
1686 old_delay = dev->power.autosuspend_delay;
1687 old_use = dev->power.use_autosuspend;
1688 dev->power.use_autosuspend = use;
1689 update_autosuspend(dev, old_delay, old_use);
1690 spin_unlock_irq(&dev->power.lock);
1691 }
1692 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1693
1694
1695
1696
1697
1698 void pm_runtime_init(struct device *dev)
1699 {
1700 dev->power.runtime_status = RPM_SUSPENDED;
1701 dev->power.last_status = RPM_INVALID;
1702 dev->power.idle_notification = false;
1703
1704 dev->power.disable_depth = 1;
1705 atomic_set(&dev->power.usage_count, 0);
1706
1707 dev->power.runtime_error = 0;
1708
1709 atomic_set(&dev->power.child_count, 0);
1710 pm_suspend_ignore_children(dev, false);
1711 dev->power.runtime_auto = true;
1712
1713 dev->power.request_pending = false;
1714 dev->power.request = RPM_REQ_NONE;
1715 dev->power.deferred_resume = false;
1716 dev->power.needs_force_resume = 0;
1717 INIT_WORK(&dev->power.work, pm_runtime_work);
1718
1719 dev->power.timer_expires = 0;
1720 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1721 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1722
1723 init_waitqueue_head(&dev->power.wait_queue);
1724 }
1725
1726
1727
1728
1729
1730 void pm_runtime_reinit(struct device *dev)
1731 {
1732 if (!pm_runtime_enabled(dev)) {
1733 if (dev->power.runtime_status == RPM_ACTIVE)
1734 pm_runtime_set_suspended(dev);
1735 if (dev->power.irq_safe) {
1736 spin_lock_irq(&dev->power.lock);
1737 dev->power.irq_safe = 0;
1738 spin_unlock_irq(&dev->power.lock);
1739 if (dev->parent)
1740 pm_runtime_put(dev->parent);
1741 }
1742 }
1743 }
1744
1745
1746
1747
1748
1749 void pm_runtime_remove(struct device *dev)
1750 {
1751 __pm_runtime_disable(dev, false);
1752 pm_runtime_reinit(dev);
1753 }
1754
1755
1756
1757
1758
1759 void pm_runtime_get_suppliers(struct device *dev)
1760 {
1761 struct device_link *link;
1762 int idx;
1763
1764 idx = device_links_read_lock();
1765
1766 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1767 device_links_read_lock_held())
1768 if (link->flags & DL_FLAG_PM_RUNTIME) {
1769 link->supplier_preactivated = true;
1770 pm_runtime_get_sync(link->supplier);
1771 }
1772
1773 device_links_read_unlock(idx);
1774 }
1775
1776
1777
1778
1779
1780 void pm_runtime_put_suppliers(struct device *dev)
1781 {
1782 struct device_link *link;
1783 int idx;
1784
1785 idx = device_links_read_lock();
1786
1787 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1788 device_links_read_lock_held())
1789 if (link->supplier_preactivated) {
1790 link->supplier_preactivated = false;
1791 pm_runtime_put(link->supplier);
1792 }
1793
1794 device_links_read_unlock(idx);
1795 }
1796
1797 void pm_runtime_new_link(struct device *dev)
1798 {
1799 spin_lock_irq(&dev->power.lock);
1800 dev->power.links_count++;
1801 spin_unlock_irq(&dev->power.lock);
1802 }
1803
1804 static void pm_runtime_drop_link_count(struct device *dev)
1805 {
1806 spin_lock_irq(&dev->power.lock);
1807 WARN_ON(dev->power.links_count == 0);
1808 dev->power.links_count--;
1809 spin_unlock_irq(&dev->power.lock);
1810 }
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 void pm_runtime_drop_link(struct device_link *link)
1821 {
1822 if (!(link->flags & DL_FLAG_PM_RUNTIME))
1823 return;
1824
1825 pm_runtime_drop_link_count(link->consumer);
1826 pm_runtime_release_supplier(link);
1827 pm_request_idle(link->supplier);
1828 }
1829
1830 static bool pm_runtime_need_not_resume(struct device *dev)
1831 {
1832 return atomic_read(&dev->power.usage_count) <= 1 &&
1833 (atomic_read(&dev->power.child_count) == 0 ||
1834 dev->power.ignore_children);
1835 }
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854 int pm_runtime_force_suspend(struct device *dev)
1855 {
1856 int (*callback)(struct device *);
1857 int ret;
1858
1859 pm_runtime_disable(dev);
1860 if (pm_runtime_status_suspended(dev))
1861 return 0;
1862
1863 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1864
1865 dev_pm_enable_wake_irq_check(dev, true);
1866 ret = callback ? callback(dev) : 0;
1867 if (ret)
1868 goto err;
1869
1870 dev_pm_enable_wake_irq_complete(dev);
1871
1872
1873
1874
1875
1876
1877
1878 if (pm_runtime_need_not_resume(dev)) {
1879 pm_runtime_set_suspended(dev);
1880 } else {
1881 __update_runtime_status(dev, RPM_SUSPENDED);
1882 dev->power.needs_force_resume = 1;
1883 }
1884
1885 return 0;
1886
1887 err:
1888 dev_pm_disable_wake_irq_check(dev, true);
1889 pm_runtime_enable(dev);
1890 return ret;
1891 }
1892 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 int pm_runtime_force_resume(struct device *dev)
1907 {
1908 int (*callback)(struct device *);
1909 int ret = 0;
1910
1911 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1912 goto out;
1913
1914
1915
1916
1917
1918 __update_runtime_status(dev, RPM_ACTIVE);
1919
1920 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1921
1922 dev_pm_disable_wake_irq_check(dev, false);
1923 ret = callback ? callback(dev) : 0;
1924 if (ret) {
1925 pm_runtime_set_suspended(dev);
1926 dev_pm_enable_wake_irq_check(dev, false);
1927 goto out;
1928 }
1929
1930 pm_runtime_mark_last_busy(dev);
1931 out:
1932 dev->power.needs_force_resume = 0;
1933 pm_runtime_enable(dev);
1934 return ret;
1935 }
1936 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);