Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * drivers/base/power/main.c - Where the driver meets power management.
0004  *
0005  * Copyright (c) 2003 Patrick Mochel
0006  * Copyright (c) 2003 Open Source Development Lab
0007  *
0008  * The driver model core calls device_pm_add() when a device is registered.
0009  * This will initialize the embedded device_pm_info object in the device
0010  * and add it to the list of power-controlled devices. sysfs entries for
0011  * controlling device power management will also be added.
0012  *
0013  * A separate list is used for keeping track of power info, because the power
0014  * domain dependencies may differ from the ancestral dependencies that the
0015  * subsystem list maintains.
0016  */
0017 
0018 #define pr_fmt(fmt) "PM: " fmt
0019 #define dev_fmt pr_fmt
0020 
0021 #include <linux/device.h>
0022 #include <linux/export.h>
0023 #include <linux/mutex.h>
0024 #include <linux/pm.h>
0025 #include <linux/pm_runtime.h>
0026 #include <linux/pm-trace.h>
0027 #include <linux/pm_wakeirq.h>
0028 #include <linux/interrupt.h>
0029 #include <linux/sched.h>
0030 #include <linux/sched/debug.h>
0031 #include <linux/async.h>
0032 #include <linux/suspend.h>
0033 #include <trace/events/power.h>
0034 #include <linux/cpufreq.h>
0035 #include <linux/devfreq.h>
0036 #include <linux/timer.h>
0037 
0038 #include "../base.h"
0039 #include "power.h"
0040 
0041 typedef int (*pm_callback_t)(struct device *);
0042 
0043 #define list_for_each_entry_rcu_locked(pos, head, member) \
0044     list_for_each_entry_rcu(pos, head, member, \
0045             device_links_read_lock_held())
0046 
0047 /*
0048  * The entries in the dpm_list list are in a depth first order, simply
0049  * because children are guaranteed to be discovered after parents, and
0050  * are inserted at the back of the list on discovery.
0051  *
0052  * Since device_pm_add() may be called with a device lock held,
0053  * we must never try to acquire a device lock while holding
0054  * dpm_list_mutex.
0055  */
0056 
0057 LIST_HEAD(dpm_list);
0058 static LIST_HEAD(dpm_prepared_list);
0059 static LIST_HEAD(dpm_suspended_list);
0060 static LIST_HEAD(dpm_late_early_list);
0061 static LIST_HEAD(dpm_noirq_list);
0062 
0063 struct suspend_stats suspend_stats;
0064 static DEFINE_MUTEX(dpm_list_mtx);
0065 static pm_message_t pm_transition;
0066 
0067 static int async_error;
0068 
0069 static const char *pm_verb(int event)
0070 {
0071     switch (event) {
0072     case PM_EVENT_SUSPEND:
0073         return "suspend";
0074     case PM_EVENT_RESUME:
0075         return "resume";
0076     case PM_EVENT_FREEZE:
0077         return "freeze";
0078     case PM_EVENT_QUIESCE:
0079         return "quiesce";
0080     case PM_EVENT_HIBERNATE:
0081         return "hibernate";
0082     case PM_EVENT_THAW:
0083         return "thaw";
0084     case PM_EVENT_RESTORE:
0085         return "restore";
0086     case PM_EVENT_RECOVER:
0087         return "recover";
0088     default:
0089         return "(unknown PM event)";
0090     }
0091 }
0092 
0093 /**
0094  * device_pm_sleep_init - Initialize system suspend-related device fields.
0095  * @dev: Device object being initialized.
0096  */
0097 void device_pm_sleep_init(struct device *dev)
0098 {
0099     dev->power.is_prepared = false;
0100     dev->power.is_suspended = false;
0101     dev->power.is_noirq_suspended = false;
0102     dev->power.is_late_suspended = false;
0103     init_completion(&dev->power.completion);
0104     complete_all(&dev->power.completion);
0105     dev->power.wakeup = NULL;
0106     INIT_LIST_HEAD(&dev->power.entry);
0107 }
0108 
0109 /**
0110  * device_pm_lock - Lock the list of active devices used by the PM core.
0111  */
0112 void device_pm_lock(void)
0113 {
0114     mutex_lock(&dpm_list_mtx);
0115 }
0116 
0117 /**
0118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
0119  */
0120 void device_pm_unlock(void)
0121 {
0122     mutex_unlock(&dpm_list_mtx);
0123 }
0124 
0125 /**
0126  * device_pm_add - Add a device to the PM core's list of active devices.
0127  * @dev: Device to add to the list.
0128  */
0129 void device_pm_add(struct device *dev)
0130 {
0131     /* Skip PM setup/initialization. */
0132     if (device_pm_not_required(dev))
0133         return;
0134 
0135     pr_debug("Adding info for %s:%s\n",
0136          dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
0137     device_pm_check_callbacks(dev);
0138     mutex_lock(&dpm_list_mtx);
0139     if (dev->parent && dev->parent->power.is_prepared)
0140         dev_warn(dev, "parent %s should not be sleeping\n",
0141             dev_name(dev->parent));
0142     list_add_tail(&dev->power.entry, &dpm_list);
0143     dev->power.in_dpm_list = true;
0144     mutex_unlock(&dpm_list_mtx);
0145 }
0146 
0147 /**
0148  * device_pm_remove - Remove a device from the PM core's list of active devices.
0149  * @dev: Device to be removed from the list.
0150  */
0151 void device_pm_remove(struct device *dev)
0152 {
0153     if (device_pm_not_required(dev))
0154         return;
0155 
0156     pr_debug("Removing info for %s:%s\n",
0157          dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
0158     complete_all(&dev->power.completion);
0159     mutex_lock(&dpm_list_mtx);
0160     list_del_init(&dev->power.entry);
0161     dev->power.in_dpm_list = false;
0162     mutex_unlock(&dpm_list_mtx);
0163     device_wakeup_disable(dev);
0164     pm_runtime_remove(dev);
0165     device_pm_check_callbacks(dev);
0166 }
0167 
0168 /**
0169  * device_pm_move_before - Move device in the PM core's list of active devices.
0170  * @deva: Device to move in dpm_list.
0171  * @devb: Device @deva should come before.
0172  */
0173 void device_pm_move_before(struct device *deva, struct device *devb)
0174 {
0175     pr_debug("Moving %s:%s before %s:%s\n",
0176          deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
0177          devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
0178     /* Delete deva from dpm_list and reinsert before devb. */
0179     list_move_tail(&deva->power.entry, &devb->power.entry);
0180 }
0181 
0182 /**
0183  * device_pm_move_after - Move device in the PM core's list of active devices.
0184  * @deva: Device to move in dpm_list.
0185  * @devb: Device @deva should come after.
0186  */
0187 void device_pm_move_after(struct device *deva, struct device *devb)
0188 {
0189     pr_debug("Moving %s:%s after %s:%s\n",
0190          deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
0191          devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
0192     /* Delete deva from dpm_list and reinsert after devb. */
0193     list_move(&deva->power.entry, &devb->power.entry);
0194 }
0195 
0196 /**
0197  * device_pm_move_last - Move device to end of the PM core's list of devices.
0198  * @dev: Device to move in dpm_list.
0199  */
0200 void device_pm_move_last(struct device *dev)
0201 {
0202     pr_debug("Moving %s:%s to end of list\n",
0203          dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
0204     list_move_tail(&dev->power.entry, &dpm_list);
0205 }
0206 
0207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
0208 {
0209     if (!pm_print_times_enabled)
0210         return 0;
0211 
0212     dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
0213          task_pid_nr(current),
0214          dev->parent ? dev_name(dev->parent) : "none");
0215     return ktime_get();
0216 }
0217 
0218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
0219                   void *cb, int error)
0220 {
0221     ktime_t rettime;
0222 
0223     if (!pm_print_times_enabled)
0224         return;
0225 
0226     rettime = ktime_get();
0227     dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
0228          (unsigned long long)ktime_us_delta(rettime, calltime));
0229 }
0230 
0231 /**
0232  * dpm_wait - Wait for a PM operation to complete.
0233  * @dev: Device to wait for.
0234  * @async: If unset, wait only if the device's power.async_suspend flag is set.
0235  */
0236 static void dpm_wait(struct device *dev, bool async)
0237 {
0238     if (!dev)
0239         return;
0240 
0241     if (async || (pm_async_enabled && dev->power.async_suspend))
0242         wait_for_completion(&dev->power.completion);
0243 }
0244 
0245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
0246 {
0247     dpm_wait(dev, *((bool *)async_ptr));
0248     return 0;
0249 }
0250 
0251 static void dpm_wait_for_children(struct device *dev, bool async)
0252 {
0253        device_for_each_child(dev, &async, dpm_wait_fn);
0254 }
0255 
0256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
0257 {
0258     struct device_link *link;
0259     int idx;
0260 
0261     idx = device_links_read_lock();
0262 
0263     /*
0264      * If the supplier goes away right after we've checked the link to it,
0265      * we'll wait for its completion to change the state, but that's fine,
0266      * because the only things that will block as a result are the SRCU
0267      * callbacks freeing the link objects for the links in the list we're
0268      * walking.
0269      */
0270     list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
0271         if (READ_ONCE(link->status) != DL_STATE_DORMANT)
0272             dpm_wait(link->supplier, async);
0273 
0274     device_links_read_unlock(idx);
0275 }
0276 
0277 static bool dpm_wait_for_superior(struct device *dev, bool async)
0278 {
0279     struct device *parent;
0280 
0281     /*
0282      * If the device is resumed asynchronously and the parent's callback
0283      * deletes both the device and the parent itself, the parent object may
0284      * be freed while this function is running, so avoid that by reference
0285      * counting the parent once more unless the device has been deleted
0286      * already (in which case return right away).
0287      */
0288     mutex_lock(&dpm_list_mtx);
0289 
0290     if (!device_pm_initialized(dev)) {
0291         mutex_unlock(&dpm_list_mtx);
0292         return false;
0293     }
0294 
0295     parent = get_device(dev->parent);
0296 
0297     mutex_unlock(&dpm_list_mtx);
0298 
0299     dpm_wait(parent, async);
0300     put_device(parent);
0301 
0302     dpm_wait_for_suppliers(dev, async);
0303 
0304     /*
0305      * If the parent's callback has deleted the device, attempting to resume
0306      * it would be invalid, so avoid doing that then.
0307      */
0308     return device_pm_initialized(dev);
0309 }
0310 
0311 static void dpm_wait_for_consumers(struct device *dev, bool async)
0312 {
0313     struct device_link *link;
0314     int idx;
0315 
0316     idx = device_links_read_lock();
0317 
0318     /*
0319      * The status of a device link can only be changed from "dormant" by a
0320      * probe, but that cannot happen during system suspend/resume.  In
0321      * theory it can change to "dormant" at that time, but then it is
0322      * reasonable to wait for the target device anyway (eg. if it goes
0323      * away, it's better to wait for it to go away completely and then
0324      * continue instead of trying to continue in parallel with its
0325      * unregistration).
0326      */
0327     list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
0328         if (READ_ONCE(link->status) != DL_STATE_DORMANT)
0329             dpm_wait(link->consumer, async);
0330 
0331     device_links_read_unlock(idx);
0332 }
0333 
0334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
0335 {
0336     dpm_wait_for_children(dev, async);
0337     dpm_wait_for_consumers(dev, async);
0338 }
0339 
0340 /**
0341  * pm_op - Return the PM operation appropriate for given PM event.
0342  * @ops: PM operations to choose from.
0343  * @state: PM transition of the system being carried out.
0344  */
0345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
0346 {
0347     switch (state.event) {
0348 #ifdef CONFIG_SUSPEND
0349     case PM_EVENT_SUSPEND:
0350         return ops->suspend;
0351     case PM_EVENT_RESUME:
0352         return ops->resume;
0353 #endif /* CONFIG_SUSPEND */
0354 #ifdef CONFIG_HIBERNATE_CALLBACKS
0355     case PM_EVENT_FREEZE:
0356     case PM_EVENT_QUIESCE:
0357         return ops->freeze;
0358     case PM_EVENT_HIBERNATE:
0359         return ops->poweroff;
0360     case PM_EVENT_THAW:
0361     case PM_EVENT_RECOVER:
0362         return ops->thaw;
0363     case PM_EVENT_RESTORE:
0364         return ops->restore;
0365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
0366     }
0367 
0368     return NULL;
0369 }
0370 
0371 /**
0372  * pm_late_early_op - Return the PM operation appropriate for given PM event.
0373  * @ops: PM operations to choose from.
0374  * @state: PM transition of the system being carried out.
0375  *
0376  * Runtime PM is disabled for @dev while this function is being executed.
0377  */
0378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
0379                       pm_message_t state)
0380 {
0381     switch (state.event) {
0382 #ifdef CONFIG_SUSPEND
0383     case PM_EVENT_SUSPEND:
0384         return ops->suspend_late;
0385     case PM_EVENT_RESUME:
0386         return ops->resume_early;
0387 #endif /* CONFIG_SUSPEND */
0388 #ifdef CONFIG_HIBERNATE_CALLBACKS
0389     case PM_EVENT_FREEZE:
0390     case PM_EVENT_QUIESCE:
0391         return ops->freeze_late;
0392     case PM_EVENT_HIBERNATE:
0393         return ops->poweroff_late;
0394     case PM_EVENT_THAW:
0395     case PM_EVENT_RECOVER:
0396         return ops->thaw_early;
0397     case PM_EVENT_RESTORE:
0398         return ops->restore_early;
0399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
0400     }
0401 
0402     return NULL;
0403 }
0404 
0405 /**
0406  * pm_noirq_op - Return the PM operation appropriate for given PM event.
0407  * @ops: PM operations to choose from.
0408  * @state: PM transition of the system being carried out.
0409  *
0410  * The driver of @dev will not receive interrupts while this function is being
0411  * executed.
0412  */
0413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
0414 {
0415     switch (state.event) {
0416 #ifdef CONFIG_SUSPEND
0417     case PM_EVENT_SUSPEND:
0418         return ops->suspend_noirq;
0419     case PM_EVENT_RESUME:
0420         return ops->resume_noirq;
0421 #endif /* CONFIG_SUSPEND */
0422 #ifdef CONFIG_HIBERNATE_CALLBACKS
0423     case PM_EVENT_FREEZE:
0424     case PM_EVENT_QUIESCE:
0425         return ops->freeze_noirq;
0426     case PM_EVENT_HIBERNATE:
0427         return ops->poweroff_noirq;
0428     case PM_EVENT_THAW:
0429     case PM_EVENT_RECOVER:
0430         return ops->thaw_noirq;
0431     case PM_EVENT_RESTORE:
0432         return ops->restore_noirq;
0433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
0434     }
0435 
0436     return NULL;
0437 }
0438 
0439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
0440 {
0441     dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
0442         ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
0443         ", may wakeup" : "", dev->power.driver_flags);
0444 }
0445 
0446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
0447             int error)
0448 {
0449     dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
0450         error);
0451 }
0452 
0453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
0454               const char *info)
0455 {
0456     ktime_t calltime;
0457     u64 usecs64;
0458     int usecs;
0459 
0460     calltime = ktime_get();
0461     usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
0462     do_div(usecs64, NSEC_PER_USEC);
0463     usecs = usecs64;
0464     if (usecs == 0)
0465         usecs = 1;
0466 
0467     pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
0468           info ?: "", info ? " " : "", pm_verb(state.event),
0469           error ? "aborted" : "complete",
0470           usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
0471 }
0472 
0473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
0474                 pm_message_t state, const char *info)
0475 {
0476     ktime_t calltime;
0477     int error;
0478 
0479     if (!cb)
0480         return 0;
0481 
0482     calltime = initcall_debug_start(dev, cb);
0483 
0484     pm_dev_dbg(dev, state, info);
0485     trace_device_pm_callback_start(dev, info, state.event);
0486     error = cb(dev);
0487     trace_device_pm_callback_end(dev, error);
0488     suspend_report_result(dev, cb, error);
0489 
0490     initcall_debug_report(dev, calltime, cb, error);
0491 
0492     return error;
0493 }
0494 
0495 #ifdef CONFIG_DPM_WATCHDOG
0496 struct dpm_watchdog {
0497     struct device       *dev;
0498     struct task_struct  *tsk;
0499     struct timer_list   timer;
0500 };
0501 
0502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
0503     struct dpm_watchdog wd
0504 
0505 /**
0506  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
0507  * @t: The timer that PM watchdog depends on.
0508  *
0509  * Called when a driver has timed out suspending or resuming.
0510  * There's not much we can do here to recover so panic() to
0511  * capture a crash-dump in pstore.
0512  */
0513 static void dpm_watchdog_handler(struct timer_list *t)
0514 {
0515     struct dpm_watchdog *wd = from_timer(wd, t, timer);
0516 
0517     dev_emerg(wd->dev, "**** DPM device timeout ****\n");
0518     show_stack(wd->tsk, NULL, KERN_EMERG);
0519     panic("%s %s: unrecoverable failure\n",
0520         dev_driver_string(wd->dev), dev_name(wd->dev));
0521 }
0522 
0523 /**
0524  * dpm_watchdog_set - Enable pm watchdog for given device.
0525  * @wd: Watchdog. Must be allocated on the stack.
0526  * @dev: Device to handle.
0527  */
0528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
0529 {
0530     struct timer_list *timer = &wd->timer;
0531 
0532     wd->dev = dev;
0533     wd->tsk = current;
0534 
0535     timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
0536     /* use same timeout value for both suspend and resume */
0537     timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
0538     add_timer(timer);
0539 }
0540 
0541 /**
0542  * dpm_watchdog_clear - Disable suspend/resume watchdog.
0543  * @wd: Watchdog to disable.
0544  */
0545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
0546 {
0547     struct timer_list *timer = &wd->timer;
0548 
0549     del_timer_sync(timer);
0550     destroy_timer_on_stack(timer);
0551 }
0552 #else
0553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
0554 #define dpm_watchdog_set(x, y)
0555 #define dpm_watchdog_clear(x)
0556 #endif
0557 
0558 /*------------------------- Resume routines -------------------------*/
0559 
0560 /**
0561  * dev_pm_skip_resume - System-wide device resume optimization check.
0562  * @dev: Target device.
0563  *
0564  * Return:
0565  * - %false if the transition under way is RESTORE.
0566  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
0567  * - The logical negation of %power.must_resume otherwise (that is, when the
0568  *   transition under way is RESUME).
0569  */
0570 bool dev_pm_skip_resume(struct device *dev)
0571 {
0572     if (pm_transition.event == PM_EVENT_RESTORE)
0573         return false;
0574 
0575     if (pm_transition.event == PM_EVENT_THAW)
0576         return dev_pm_skip_suspend(dev);
0577 
0578     return !dev->power.must_resume;
0579 }
0580 
0581 /**
0582  * device_resume_noirq - Execute a "noirq resume" callback for given device.
0583  * @dev: Device to handle.
0584  * @state: PM transition of the system being carried out.
0585  * @async: If true, the device is being resumed asynchronously.
0586  *
0587  * The driver of @dev will not receive interrupts while this function is being
0588  * executed.
0589  */
0590 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
0591 {
0592     pm_callback_t callback = NULL;
0593     const char *info = NULL;
0594     bool skip_resume;
0595     int error = 0;
0596 
0597     TRACE_DEVICE(dev);
0598     TRACE_RESUME(0);
0599 
0600     if (dev->power.syscore || dev->power.direct_complete)
0601         goto Out;
0602 
0603     if (!dev->power.is_noirq_suspended)
0604         goto Out;
0605 
0606     if (!dpm_wait_for_superior(dev, async))
0607         goto Out;
0608 
0609     skip_resume = dev_pm_skip_resume(dev);
0610     /*
0611      * If the driver callback is skipped below or by the middle layer
0612      * callback and device_resume_early() also skips the driver callback for
0613      * this device later, it needs to appear as "suspended" to PM-runtime,
0614      * so change its status accordingly.
0615      *
0616      * Otherwise, the device is going to be resumed, so set its PM-runtime
0617      * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
0618      * to avoid confusing drivers that don't use it.
0619      */
0620     if (skip_resume)
0621         pm_runtime_set_suspended(dev);
0622     else if (dev_pm_skip_suspend(dev))
0623         pm_runtime_set_active(dev);
0624 
0625     if (dev->pm_domain) {
0626         info = "noirq power domain ";
0627         callback = pm_noirq_op(&dev->pm_domain->ops, state);
0628     } else if (dev->type && dev->type->pm) {
0629         info = "noirq type ";
0630         callback = pm_noirq_op(dev->type->pm, state);
0631     } else if (dev->class && dev->class->pm) {
0632         info = "noirq class ";
0633         callback = pm_noirq_op(dev->class->pm, state);
0634     } else if (dev->bus && dev->bus->pm) {
0635         info = "noirq bus ";
0636         callback = pm_noirq_op(dev->bus->pm, state);
0637     }
0638     if (callback)
0639         goto Run;
0640 
0641     if (skip_resume)
0642         goto Skip;
0643 
0644     if (dev->driver && dev->driver->pm) {
0645         info = "noirq driver ";
0646         callback = pm_noirq_op(dev->driver->pm, state);
0647     }
0648 
0649 Run:
0650     error = dpm_run_callback(callback, dev, state, info);
0651 
0652 Skip:
0653     dev->power.is_noirq_suspended = false;
0654 
0655 Out:
0656     complete_all(&dev->power.completion);
0657     TRACE_RESUME(error);
0658     return error;
0659 }
0660 
0661 static bool is_async(struct device *dev)
0662 {
0663     return dev->power.async_suspend && pm_async_enabled
0664         && !pm_trace_is_enabled();
0665 }
0666 
0667 static bool dpm_async_fn(struct device *dev, async_func_t func)
0668 {
0669     reinit_completion(&dev->power.completion);
0670 
0671     if (is_async(dev)) {
0672         get_device(dev);
0673         async_schedule_dev(func, dev);
0674         return true;
0675     }
0676 
0677     return false;
0678 }
0679 
0680 static void async_resume_noirq(void *data, async_cookie_t cookie)
0681 {
0682     struct device *dev = (struct device *)data;
0683     int error;
0684 
0685     error = device_resume_noirq(dev, pm_transition, true);
0686     if (error)
0687         pm_dev_err(dev, pm_transition, " async", error);
0688 
0689     put_device(dev);
0690 }
0691 
0692 static void dpm_noirq_resume_devices(pm_message_t state)
0693 {
0694     struct device *dev;
0695     ktime_t starttime = ktime_get();
0696 
0697     trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
0698     mutex_lock(&dpm_list_mtx);
0699     pm_transition = state;
0700 
0701     /*
0702      * Advanced the async threads upfront,
0703      * in case the starting of async threads is
0704      * delayed by non-async resuming devices.
0705      */
0706     list_for_each_entry(dev, &dpm_noirq_list, power.entry)
0707         dpm_async_fn(dev, async_resume_noirq);
0708 
0709     while (!list_empty(&dpm_noirq_list)) {
0710         dev = to_device(dpm_noirq_list.next);
0711         get_device(dev);
0712         list_move_tail(&dev->power.entry, &dpm_late_early_list);
0713 
0714         mutex_unlock(&dpm_list_mtx);
0715 
0716         if (!is_async(dev)) {
0717             int error;
0718 
0719             error = device_resume_noirq(dev, state, false);
0720             if (error) {
0721                 suspend_stats.failed_resume_noirq++;
0722                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
0723                 dpm_save_failed_dev(dev_name(dev));
0724                 pm_dev_err(dev, state, " noirq", error);
0725             }
0726         }
0727 
0728         put_device(dev);
0729 
0730         mutex_lock(&dpm_list_mtx);
0731     }
0732     mutex_unlock(&dpm_list_mtx);
0733     async_synchronize_full();
0734     dpm_show_time(starttime, state, 0, "noirq");
0735     trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
0736 }
0737 
0738 /**
0739  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
0740  * @state: PM transition of the system being carried out.
0741  *
0742  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
0743  * allow device drivers' interrupt handlers to be called.
0744  */
0745 void dpm_resume_noirq(pm_message_t state)
0746 {
0747     dpm_noirq_resume_devices(state);
0748 
0749     resume_device_irqs();
0750     device_wakeup_disarm_wake_irqs();
0751 }
0752 
0753 /**
0754  * device_resume_early - Execute an "early resume" callback for given device.
0755  * @dev: Device to handle.
0756  * @state: PM transition of the system being carried out.
0757  * @async: If true, the device is being resumed asynchronously.
0758  *
0759  * Runtime PM is disabled for @dev while this function is being executed.
0760  */
0761 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
0762 {
0763     pm_callback_t callback = NULL;
0764     const char *info = NULL;
0765     int error = 0;
0766 
0767     TRACE_DEVICE(dev);
0768     TRACE_RESUME(0);
0769 
0770     if (dev->power.syscore || dev->power.direct_complete)
0771         goto Out;
0772 
0773     if (!dev->power.is_late_suspended)
0774         goto Out;
0775 
0776     if (!dpm_wait_for_superior(dev, async))
0777         goto Out;
0778 
0779     if (dev->pm_domain) {
0780         info = "early power domain ";
0781         callback = pm_late_early_op(&dev->pm_domain->ops, state);
0782     } else if (dev->type && dev->type->pm) {
0783         info = "early type ";
0784         callback = pm_late_early_op(dev->type->pm, state);
0785     } else if (dev->class && dev->class->pm) {
0786         info = "early class ";
0787         callback = pm_late_early_op(dev->class->pm, state);
0788     } else if (dev->bus && dev->bus->pm) {
0789         info = "early bus ";
0790         callback = pm_late_early_op(dev->bus->pm, state);
0791     }
0792     if (callback)
0793         goto Run;
0794 
0795     if (dev_pm_skip_resume(dev))
0796         goto Skip;
0797 
0798     if (dev->driver && dev->driver->pm) {
0799         info = "early driver ";
0800         callback = pm_late_early_op(dev->driver->pm, state);
0801     }
0802 
0803 Run:
0804     error = dpm_run_callback(callback, dev, state, info);
0805 
0806 Skip:
0807     dev->power.is_late_suspended = false;
0808 
0809 Out:
0810     TRACE_RESUME(error);
0811 
0812     pm_runtime_enable(dev);
0813     complete_all(&dev->power.completion);
0814     return error;
0815 }
0816 
0817 static void async_resume_early(void *data, async_cookie_t cookie)
0818 {
0819     struct device *dev = (struct device *)data;
0820     int error;
0821 
0822     error = device_resume_early(dev, pm_transition, true);
0823     if (error)
0824         pm_dev_err(dev, pm_transition, " async", error);
0825 
0826     put_device(dev);
0827 }
0828 
0829 /**
0830  * dpm_resume_early - Execute "early resume" callbacks for all devices.
0831  * @state: PM transition of the system being carried out.
0832  */
0833 void dpm_resume_early(pm_message_t state)
0834 {
0835     struct device *dev;
0836     ktime_t starttime = ktime_get();
0837 
0838     trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
0839     mutex_lock(&dpm_list_mtx);
0840     pm_transition = state;
0841 
0842     /*
0843      * Advanced the async threads upfront,
0844      * in case the starting of async threads is
0845      * delayed by non-async resuming devices.
0846      */
0847     list_for_each_entry(dev, &dpm_late_early_list, power.entry)
0848         dpm_async_fn(dev, async_resume_early);
0849 
0850     while (!list_empty(&dpm_late_early_list)) {
0851         dev = to_device(dpm_late_early_list.next);
0852         get_device(dev);
0853         list_move_tail(&dev->power.entry, &dpm_suspended_list);
0854 
0855         mutex_unlock(&dpm_list_mtx);
0856 
0857         if (!is_async(dev)) {
0858             int error;
0859 
0860             error = device_resume_early(dev, state, false);
0861             if (error) {
0862                 suspend_stats.failed_resume_early++;
0863                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
0864                 dpm_save_failed_dev(dev_name(dev));
0865                 pm_dev_err(dev, state, " early", error);
0866             }
0867         }
0868 
0869         put_device(dev);
0870 
0871         mutex_lock(&dpm_list_mtx);
0872     }
0873     mutex_unlock(&dpm_list_mtx);
0874     async_synchronize_full();
0875     dpm_show_time(starttime, state, 0, "early");
0876     trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
0877 }
0878 
0879 /**
0880  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
0881  * @state: PM transition of the system being carried out.
0882  */
0883 void dpm_resume_start(pm_message_t state)
0884 {
0885     dpm_resume_noirq(state);
0886     dpm_resume_early(state);
0887 }
0888 EXPORT_SYMBOL_GPL(dpm_resume_start);
0889 
0890 /**
0891  * device_resume - Execute "resume" callbacks for given device.
0892  * @dev: Device to handle.
0893  * @state: PM transition of the system being carried out.
0894  * @async: If true, the device is being resumed asynchronously.
0895  */
0896 static int device_resume(struct device *dev, pm_message_t state, bool async)
0897 {
0898     pm_callback_t callback = NULL;
0899     const char *info = NULL;
0900     int error = 0;
0901     DECLARE_DPM_WATCHDOG_ON_STACK(wd);
0902 
0903     TRACE_DEVICE(dev);
0904     TRACE_RESUME(0);
0905 
0906     if (dev->power.syscore)
0907         goto Complete;
0908 
0909     if (dev->power.direct_complete) {
0910         /* Match the pm_runtime_disable() in __device_suspend(). */
0911         pm_runtime_enable(dev);
0912         goto Complete;
0913     }
0914 
0915     if (!dpm_wait_for_superior(dev, async))
0916         goto Complete;
0917 
0918     dpm_watchdog_set(&wd, dev);
0919     device_lock(dev);
0920 
0921     /*
0922      * This is a fib.  But we'll allow new children to be added below
0923      * a resumed device, even if the device hasn't been completed yet.
0924      */
0925     dev->power.is_prepared = false;
0926 
0927     if (!dev->power.is_suspended)
0928         goto Unlock;
0929 
0930     if (dev->pm_domain) {
0931         info = "power domain ";
0932         callback = pm_op(&dev->pm_domain->ops, state);
0933         goto Driver;
0934     }
0935 
0936     if (dev->type && dev->type->pm) {
0937         info = "type ";
0938         callback = pm_op(dev->type->pm, state);
0939         goto Driver;
0940     }
0941 
0942     if (dev->class && dev->class->pm) {
0943         info = "class ";
0944         callback = pm_op(dev->class->pm, state);
0945         goto Driver;
0946     }
0947 
0948     if (dev->bus) {
0949         if (dev->bus->pm) {
0950             info = "bus ";
0951             callback = pm_op(dev->bus->pm, state);
0952         } else if (dev->bus->resume) {
0953             info = "legacy bus ";
0954             callback = dev->bus->resume;
0955             goto End;
0956         }
0957     }
0958 
0959  Driver:
0960     if (!callback && dev->driver && dev->driver->pm) {
0961         info = "driver ";
0962         callback = pm_op(dev->driver->pm, state);
0963     }
0964 
0965  End:
0966     error = dpm_run_callback(callback, dev, state, info);
0967     dev->power.is_suspended = false;
0968 
0969  Unlock:
0970     device_unlock(dev);
0971     dpm_watchdog_clear(&wd);
0972 
0973  Complete:
0974     complete_all(&dev->power.completion);
0975 
0976     TRACE_RESUME(error);
0977 
0978     return error;
0979 }
0980 
0981 static void async_resume(void *data, async_cookie_t cookie)
0982 {
0983     struct device *dev = (struct device *)data;
0984     int error;
0985 
0986     error = device_resume(dev, pm_transition, true);
0987     if (error)
0988         pm_dev_err(dev, pm_transition, " async", error);
0989     put_device(dev);
0990 }
0991 
0992 /**
0993  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
0994  * @state: PM transition of the system being carried out.
0995  *
0996  * Execute the appropriate "resume" callback for all devices whose status
0997  * indicates that they are suspended.
0998  */
0999 void dpm_resume(pm_message_t state)
1000 {
1001     struct device *dev;
1002     ktime_t starttime = ktime_get();
1003 
1004     trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005     might_sleep();
1006 
1007     mutex_lock(&dpm_list_mtx);
1008     pm_transition = state;
1009     async_error = 0;
1010 
1011     list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1012         dpm_async_fn(dev, async_resume);
1013 
1014     while (!list_empty(&dpm_suspended_list)) {
1015         dev = to_device(dpm_suspended_list.next);
1016         get_device(dev);
1017         if (!is_async(dev)) {
1018             int error;
1019 
1020             mutex_unlock(&dpm_list_mtx);
1021 
1022             error = device_resume(dev, state, false);
1023             if (error) {
1024                 suspend_stats.failed_resume++;
1025                 dpm_save_failed_step(SUSPEND_RESUME);
1026                 dpm_save_failed_dev(dev_name(dev));
1027                 pm_dev_err(dev, state, "", error);
1028             }
1029 
1030             mutex_lock(&dpm_list_mtx);
1031         }
1032         if (!list_empty(&dev->power.entry))
1033             list_move_tail(&dev->power.entry, &dpm_prepared_list);
1034 
1035         mutex_unlock(&dpm_list_mtx);
1036 
1037         put_device(dev);
1038 
1039         mutex_lock(&dpm_list_mtx);
1040     }
1041     mutex_unlock(&dpm_list_mtx);
1042     async_synchronize_full();
1043     dpm_show_time(starttime, state, 0, NULL);
1044 
1045     cpufreq_resume();
1046     devfreq_resume();
1047     trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1048 }
1049 
1050 /**
1051  * device_complete - Complete a PM transition for given device.
1052  * @dev: Device to handle.
1053  * @state: PM transition of the system being carried out.
1054  */
1055 static void device_complete(struct device *dev, pm_message_t state)
1056 {
1057     void (*callback)(struct device *) = NULL;
1058     const char *info = NULL;
1059 
1060     if (dev->power.syscore)
1061         goto out;
1062 
1063     device_lock(dev);
1064 
1065     if (dev->pm_domain) {
1066         info = "completing power domain ";
1067         callback = dev->pm_domain->ops.complete;
1068     } else if (dev->type && dev->type->pm) {
1069         info = "completing type ";
1070         callback = dev->type->pm->complete;
1071     } else if (dev->class && dev->class->pm) {
1072         info = "completing class ";
1073         callback = dev->class->pm->complete;
1074     } else if (dev->bus && dev->bus->pm) {
1075         info = "completing bus ";
1076         callback = dev->bus->pm->complete;
1077     }
1078 
1079     if (!callback && dev->driver && dev->driver->pm) {
1080         info = "completing driver ";
1081         callback = dev->driver->pm->complete;
1082     }
1083 
1084     if (callback) {
1085         pm_dev_dbg(dev, state, info);
1086         callback(dev);
1087     }
1088 
1089     device_unlock(dev);
1090 
1091 out:
1092     pm_runtime_put(dev);
1093 }
1094 
1095 /**
1096  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1097  * @state: PM transition of the system being carried out.
1098  *
1099  * Execute the ->complete() callbacks for all devices whose PM status is not
1100  * DPM_ON (this allows new devices to be registered).
1101  */
1102 void dpm_complete(pm_message_t state)
1103 {
1104     struct list_head list;
1105 
1106     trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1107     might_sleep();
1108 
1109     INIT_LIST_HEAD(&list);
1110     mutex_lock(&dpm_list_mtx);
1111     while (!list_empty(&dpm_prepared_list)) {
1112         struct device *dev = to_device(dpm_prepared_list.prev);
1113 
1114         get_device(dev);
1115         dev->power.is_prepared = false;
1116         list_move(&dev->power.entry, &list);
1117 
1118         mutex_unlock(&dpm_list_mtx);
1119 
1120         trace_device_pm_callback_start(dev, "", state.event);
1121         device_complete(dev, state);
1122         trace_device_pm_callback_end(dev, 0);
1123 
1124         put_device(dev);
1125 
1126         mutex_lock(&dpm_list_mtx);
1127     }
1128     list_splice(&list, &dpm_list);
1129     mutex_unlock(&dpm_list_mtx);
1130 
1131     /* Allow device probing and trigger re-probing of deferred devices */
1132     device_unblock_probing();
1133     trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1134 }
1135 
1136 /**
1137  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1138  * @state: PM transition of the system being carried out.
1139  *
1140  * Execute "resume" callbacks for all devices and complete the PM transition of
1141  * the system.
1142  */
1143 void dpm_resume_end(pm_message_t state)
1144 {
1145     dpm_resume(state);
1146     dpm_complete(state);
1147 }
1148 EXPORT_SYMBOL_GPL(dpm_resume_end);
1149 
1150 
1151 /*------------------------- Suspend routines -------------------------*/
1152 
1153 /**
1154  * resume_event - Return a "resume" message for given "suspend" sleep state.
1155  * @sleep_state: PM message representing a sleep state.
1156  *
1157  * Return a PM message representing the resume event corresponding to given
1158  * sleep state.
1159  */
1160 static pm_message_t resume_event(pm_message_t sleep_state)
1161 {
1162     switch (sleep_state.event) {
1163     case PM_EVENT_SUSPEND:
1164         return PMSG_RESUME;
1165     case PM_EVENT_FREEZE:
1166     case PM_EVENT_QUIESCE:
1167         return PMSG_RECOVER;
1168     case PM_EVENT_HIBERNATE:
1169         return PMSG_RESTORE;
1170     }
1171     return PMSG_ON;
1172 }
1173 
1174 static void dpm_superior_set_must_resume(struct device *dev)
1175 {
1176     struct device_link *link;
1177     int idx;
1178 
1179     if (dev->parent)
1180         dev->parent->power.must_resume = true;
1181 
1182     idx = device_links_read_lock();
1183 
1184     list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1185         link->supplier->power.must_resume = true;
1186 
1187     device_links_read_unlock(idx);
1188 }
1189 
1190 /**
1191  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1192  * @dev: Device to handle.
1193  * @state: PM transition of the system being carried out.
1194  * @async: If true, the device is being suspended asynchronously.
1195  *
1196  * The driver of @dev will not receive interrupts while this function is being
1197  * executed.
1198  */
1199 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1200 {
1201     pm_callback_t callback = NULL;
1202     const char *info = NULL;
1203     int error = 0;
1204 
1205     TRACE_DEVICE(dev);
1206     TRACE_SUSPEND(0);
1207 
1208     dpm_wait_for_subordinate(dev, async);
1209 
1210     if (async_error)
1211         goto Complete;
1212 
1213     if (dev->power.syscore || dev->power.direct_complete)
1214         goto Complete;
1215 
1216     if (dev->pm_domain) {
1217         info = "noirq power domain ";
1218         callback = pm_noirq_op(&dev->pm_domain->ops, state);
1219     } else if (dev->type && dev->type->pm) {
1220         info = "noirq type ";
1221         callback = pm_noirq_op(dev->type->pm, state);
1222     } else if (dev->class && dev->class->pm) {
1223         info = "noirq class ";
1224         callback = pm_noirq_op(dev->class->pm, state);
1225     } else if (dev->bus && dev->bus->pm) {
1226         info = "noirq bus ";
1227         callback = pm_noirq_op(dev->bus->pm, state);
1228     }
1229     if (callback)
1230         goto Run;
1231 
1232     if (dev_pm_skip_suspend(dev))
1233         goto Skip;
1234 
1235     if (dev->driver && dev->driver->pm) {
1236         info = "noirq driver ";
1237         callback = pm_noirq_op(dev->driver->pm, state);
1238     }
1239 
1240 Run:
1241     error = dpm_run_callback(callback, dev, state, info);
1242     if (error) {
1243         async_error = error;
1244         goto Complete;
1245     }
1246 
1247 Skip:
1248     dev->power.is_noirq_suspended = true;
1249 
1250     /*
1251      * Skipping the resume of devices that were in use right before the
1252      * system suspend (as indicated by their PM-runtime usage counters)
1253      * would be suboptimal.  Also resume them if doing that is not allowed
1254      * to be skipped.
1255      */
1256     if (atomic_read(&dev->power.usage_count) > 1 ||
1257         !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1258           dev->power.may_skip_resume))
1259         dev->power.must_resume = true;
1260 
1261     if (dev->power.must_resume)
1262         dpm_superior_set_must_resume(dev);
1263 
1264 Complete:
1265     complete_all(&dev->power.completion);
1266     TRACE_SUSPEND(error);
1267     return error;
1268 }
1269 
1270 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1271 {
1272     struct device *dev = (struct device *)data;
1273     int error;
1274 
1275     error = __device_suspend_noirq(dev, pm_transition, true);
1276     if (error) {
1277         dpm_save_failed_dev(dev_name(dev));
1278         pm_dev_err(dev, pm_transition, " async", error);
1279     }
1280 
1281     put_device(dev);
1282 }
1283 
1284 static int device_suspend_noirq(struct device *dev)
1285 {
1286     if (dpm_async_fn(dev, async_suspend_noirq))
1287         return 0;
1288 
1289     return __device_suspend_noirq(dev, pm_transition, false);
1290 }
1291 
1292 static int dpm_noirq_suspend_devices(pm_message_t state)
1293 {
1294     ktime_t starttime = ktime_get();
1295     int error = 0;
1296 
1297     trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1298     mutex_lock(&dpm_list_mtx);
1299     pm_transition = state;
1300     async_error = 0;
1301 
1302     while (!list_empty(&dpm_late_early_list)) {
1303         struct device *dev = to_device(dpm_late_early_list.prev);
1304 
1305         get_device(dev);
1306         mutex_unlock(&dpm_list_mtx);
1307 
1308         error = device_suspend_noirq(dev);
1309 
1310         mutex_lock(&dpm_list_mtx);
1311 
1312         if (error) {
1313             pm_dev_err(dev, state, " noirq", error);
1314             dpm_save_failed_dev(dev_name(dev));
1315         } else if (!list_empty(&dev->power.entry)) {
1316             list_move(&dev->power.entry, &dpm_noirq_list);
1317         }
1318 
1319         mutex_unlock(&dpm_list_mtx);
1320 
1321         put_device(dev);
1322 
1323         mutex_lock(&dpm_list_mtx);
1324 
1325         if (error || async_error)
1326             break;
1327     }
1328     mutex_unlock(&dpm_list_mtx);
1329     async_synchronize_full();
1330     if (!error)
1331         error = async_error;
1332 
1333     if (error) {
1334         suspend_stats.failed_suspend_noirq++;
1335         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1336     }
1337     dpm_show_time(starttime, state, error, "noirq");
1338     trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1339     return error;
1340 }
1341 
1342 /**
1343  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1344  * @state: PM transition of the system being carried out.
1345  *
1346  * Prevent device drivers' interrupt handlers from being called and invoke
1347  * "noirq" suspend callbacks for all non-sysdev devices.
1348  */
1349 int dpm_suspend_noirq(pm_message_t state)
1350 {
1351     int ret;
1352 
1353     device_wakeup_arm_wake_irqs();
1354     suspend_device_irqs();
1355 
1356     ret = dpm_noirq_suspend_devices(state);
1357     if (ret)
1358         dpm_resume_noirq(resume_event(state));
1359 
1360     return ret;
1361 }
1362 
1363 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1364 {
1365     struct device *parent = dev->parent;
1366 
1367     if (!parent)
1368         return;
1369 
1370     spin_lock_irq(&parent->power.lock);
1371 
1372     if (device_wakeup_path(dev) && !parent->power.ignore_children)
1373         parent->power.wakeup_path = true;
1374 
1375     spin_unlock_irq(&parent->power.lock);
1376 }
1377 
1378 /**
1379  * __device_suspend_late - Execute a "late suspend" callback for given device.
1380  * @dev: Device to handle.
1381  * @state: PM transition of the system being carried out.
1382  * @async: If true, the device is being suspended asynchronously.
1383  *
1384  * Runtime PM is disabled for @dev while this function is being executed.
1385  */
1386 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1387 {
1388     pm_callback_t callback = NULL;
1389     const char *info = NULL;
1390     int error = 0;
1391 
1392     TRACE_DEVICE(dev);
1393     TRACE_SUSPEND(0);
1394 
1395     __pm_runtime_disable(dev, false);
1396 
1397     dpm_wait_for_subordinate(dev, async);
1398 
1399     if (async_error)
1400         goto Complete;
1401 
1402     if (pm_wakeup_pending()) {
1403         async_error = -EBUSY;
1404         goto Complete;
1405     }
1406 
1407     if (dev->power.syscore || dev->power.direct_complete)
1408         goto Complete;
1409 
1410     if (dev->pm_domain) {
1411         info = "late power domain ";
1412         callback = pm_late_early_op(&dev->pm_domain->ops, state);
1413     } else if (dev->type && dev->type->pm) {
1414         info = "late type ";
1415         callback = pm_late_early_op(dev->type->pm, state);
1416     } else if (dev->class && dev->class->pm) {
1417         info = "late class ";
1418         callback = pm_late_early_op(dev->class->pm, state);
1419     } else if (dev->bus && dev->bus->pm) {
1420         info = "late bus ";
1421         callback = pm_late_early_op(dev->bus->pm, state);
1422     }
1423     if (callback)
1424         goto Run;
1425 
1426     if (dev_pm_skip_suspend(dev))
1427         goto Skip;
1428 
1429     if (dev->driver && dev->driver->pm) {
1430         info = "late driver ";
1431         callback = pm_late_early_op(dev->driver->pm, state);
1432     }
1433 
1434 Run:
1435     error = dpm_run_callback(callback, dev, state, info);
1436     if (error) {
1437         async_error = error;
1438         goto Complete;
1439     }
1440     dpm_propagate_wakeup_to_parent(dev);
1441 
1442 Skip:
1443     dev->power.is_late_suspended = true;
1444 
1445 Complete:
1446     TRACE_SUSPEND(error);
1447     complete_all(&dev->power.completion);
1448     return error;
1449 }
1450 
1451 static void async_suspend_late(void *data, async_cookie_t cookie)
1452 {
1453     struct device *dev = (struct device *)data;
1454     int error;
1455 
1456     error = __device_suspend_late(dev, pm_transition, true);
1457     if (error) {
1458         dpm_save_failed_dev(dev_name(dev));
1459         pm_dev_err(dev, pm_transition, " async", error);
1460     }
1461     put_device(dev);
1462 }
1463 
1464 static int device_suspend_late(struct device *dev)
1465 {
1466     if (dpm_async_fn(dev, async_suspend_late))
1467         return 0;
1468 
1469     return __device_suspend_late(dev, pm_transition, false);
1470 }
1471 
1472 /**
1473  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1474  * @state: PM transition of the system being carried out.
1475  */
1476 int dpm_suspend_late(pm_message_t state)
1477 {
1478     ktime_t starttime = ktime_get();
1479     int error = 0;
1480 
1481     trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1482     wake_up_all_idle_cpus();
1483     mutex_lock(&dpm_list_mtx);
1484     pm_transition = state;
1485     async_error = 0;
1486 
1487     while (!list_empty(&dpm_suspended_list)) {
1488         struct device *dev = to_device(dpm_suspended_list.prev);
1489 
1490         get_device(dev);
1491 
1492         mutex_unlock(&dpm_list_mtx);
1493 
1494         error = device_suspend_late(dev);
1495 
1496         mutex_lock(&dpm_list_mtx);
1497 
1498         if (!list_empty(&dev->power.entry))
1499             list_move(&dev->power.entry, &dpm_late_early_list);
1500 
1501         if (error) {
1502             pm_dev_err(dev, state, " late", error);
1503             dpm_save_failed_dev(dev_name(dev));
1504         }
1505 
1506         mutex_unlock(&dpm_list_mtx);
1507 
1508         put_device(dev);
1509 
1510         mutex_lock(&dpm_list_mtx);
1511 
1512         if (error || async_error)
1513             break;
1514     }
1515     mutex_unlock(&dpm_list_mtx);
1516     async_synchronize_full();
1517     if (!error)
1518         error = async_error;
1519     if (error) {
1520         suspend_stats.failed_suspend_late++;
1521         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1522         dpm_resume_early(resume_event(state));
1523     }
1524     dpm_show_time(starttime, state, error, "late");
1525     trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1526     return error;
1527 }
1528 
1529 /**
1530  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1531  * @state: PM transition of the system being carried out.
1532  */
1533 int dpm_suspend_end(pm_message_t state)
1534 {
1535     ktime_t starttime = ktime_get();
1536     int error;
1537 
1538     error = dpm_suspend_late(state);
1539     if (error)
1540         goto out;
1541 
1542     error = dpm_suspend_noirq(state);
1543     if (error)
1544         dpm_resume_early(resume_event(state));
1545 
1546 out:
1547     dpm_show_time(starttime, state, error, "end");
1548     return error;
1549 }
1550 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1551 
1552 /**
1553  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1554  * @dev: Device to suspend.
1555  * @state: PM transition of the system being carried out.
1556  * @cb: Suspend callback to execute.
1557  * @info: string description of caller.
1558  */
1559 static int legacy_suspend(struct device *dev, pm_message_t state,
1560               int (*cb)(struct device *dev, pm_message_t state),
1561               const char *info)
1562 {
1563     int error;
1564     ktime_t calltime;
1565 
1566     calltime = initcall_debug_start(dev, cb);
1567 
1568     trace_device_pm_callback_start(dev, info, state.event);
1569     error = cb(dev, state);
1570     trace_device_pm_callback_end(dev, error);
1571     suspend_report_result(dev, cb, error);
1572 
1573     initcall_debug_report(dev, calltime, cb, error);
1574 
1575     return error;
1576 }
1577 
1578 static void dpm_clear_superiors_direct_complete(struct device *dev)
1579 {
1580     struct device_link *link;
1581     int idx;
1582 
1583     if (dev->parent) {
1584         spin_lock_irq(&dev->parent->power.lock);
1585         dev->parent->power.direct_complete = false;
1586         spin_unlock_irq(&dev->parent->power.lock);
1587     }
1588 
1589     idx = device_links_read_lock();
1590 
1591     list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1592         spin_lock_irq(&link->supplier->power.lock);
1593         link->supplier->power.direct_complete = false;
1594         spin_unlock_irq(&link->supplier->power.lock);
1595     }
1596 
1597     device_links_read_unlock(idx);
1598 }
1599 
1600 /**
1601  * __device_suspend - Execute "suspend" callbacks for given device.
1602  * @dev: Device to handle.
1603  * @state: PM transition of the system being carried out.
1604  * @async: If true, the device is being suspended asynchronously.
1605  */
1606 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1607 {
1608     pm_callback_t callback = NULL;
1609     const char *info = NULL;
1610     int error = 0;
1611     DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1612 
1613     TRACE_DEVICE(dev);
1614     TRACE_SUSPEND(0);
1615 
1616     dpm_wait_for_subordinate(dev, async);
1617 
1618     if (async_error) {
1619         dev->power.direct_complete = false;
1620         goto Complete;
1621     }
1622 
1623     /*
1624      * Wait for possible runtime PM transitions of the device in progress
1625      * to complete and if there's a runtime resume request pending for it,
1626      * resume it before proceeding with invoking the system-wide suspend
1627      * callbacks for it.
1628      *
1629      * If the system-wide suspend callbacks below change the configuration
1630      * of the device, they must disable runtime PM for it or otherwise
1631      * ensure that its runtime-resume callbacks will not be confused by that
1632      * change in case they are invoked going forward.
1633      */
1634     pm_runtime_barrier(dev);
1635 
1636     if (pm_wakeup_pending()) {
1637         dev->power.direct_complete = false;
1638         async_error = -EBUSY;
1639         goto Complete;
1640     }
1641 
1642     if (dev->power.syscore)
1643         goto Complete;
1644 
1645     /* Avoid direct_complete to let wakeup_path propagate. */
1646     if (device_may_wakeup(dev) || device_wakeup_path(dev))
1647         dev->power.direct_complete = false;
1648 
1649     if (dev->power.direct_complete) {
1650         if (pm_runtime_status_suspended(dev)) {
1651             pm_runtime_disable(dev);
1652             if (pm_runtime_status_suspended(dev)) {
1653                 pm_dev_dbg(dev, state, "direct-complete ");
1654                 goto Complete;
1655             }
1656 
1657             pm_runtime_enable(dev);
1658         }
1659         dev->power.direct_complete = false;
1660     }
1661 
1662     dev->power.may_skip_resume = true;
1663     dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1664 
1665     dpm_watchdog_set(&wd, dev);
1666     device_lock(dev);
1667 
1668     if (dev->pm_domain) {
1669         info = "power domain ";
1670         callback = pm_op(&dev->pm_domain->ops, state);
1671         goto Run;
1672     }
1673 
1674     if (dev->type && dev->type->pm) {
1675         info = "type ";
1676         callback = pm_op(dev->type->pm, state);
1677         goto Run;
1678     }
1679 
1680     if (dev->class && dev->class->pm) {
1681         info = "class ";
1682         callback = pm_op(dev->class->pm, state);
1683         goto Run;
1684     }
1685 
1686     if (dev->bus) {
1687         if (dev->bus->pm) {
1688             info = "bus ";
1689             callback = pm_op(dev->bus->pm, state);
1690         } else if (dev->bus->suspend) {
1691             pm_dev_dbg(dev, state, "legacy bus ");
1692             error = legacy_suspend(dev, state, dev->bus->suspend,
1693                         "legacy bus ");
1694             goto End;
1695         }
1696     }
1697 
1698  Run:
1699     if (!callback && dev->driver && dev->driver->pm) {
1700         info = "driver ";
1701         callback = pm_op(dev->driver->pm, state);
1702     }
1703 
1704     error = dpm_run_callback(callback, dev, state, info);
1705 
1706  End:
1707     if (!error) {
1708         dev->power.is_suspended = true;
1709         if (device_may_wakeup(dev))
1710             dev->power.wakeup_path = true;
1711 
1712         dpm_propagate_wakeup_to_parent(dev);
1713         dpm_clear_superiors_direct_complete(dev);
1714     }
1715 
1716     device_unlock(dev);
1717     dpm_watchdog_clear(&wd);
1718 
1719  Complete:
1720     if (error)
1721         async_error = error;
1722 
1723     complete_all(&dev->power.completion);
1724     TRACE_SUSPEND(error);
1725     return error;
1726 }
1727 
1728 static void async_suspend(void *data, async_cookie_t cookie)
1729 {
1730     struct device *dev = (struct device *)data;
1731     int error;
1732 
1733     error = __device_suspend(dev, pm_transition, true);
1734     if (error) {
1735         dpm_save_failed_dev(dev_name(dev));
1736         pm_dev_err(dev, pm_transition, " async", error);
1737     }
1738 
1739     put_device(dev);
1740 }
1741 
1742 static int device_suspend(struct device *dev)
1743 {
1744     if (dpm_async_fn(dev, async_suspend))
1745         return 0;
1746 
1747     return __device_suspend(dev, pm_transition, false);
1748 }
1749 
1750 /**
1751  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1752  * @state: PM transition of the system being carried out.
1753  */
1754 int dpm_suspend(pm_message_t state)
1755 {
1756     ktime_t starttime = ktime_get();
1757     int error = 0;
1758 
1759     trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1760     might_sleep();
1761 
1762     devfreq_suspend();
1763     cpufreq_suspend();
1764 
1765     mutex_lock(&dpm_list_mtx);
1766     pm_transition = state;
1767     async_error = 0;
1768     while (!list_empty(&dpm_prepared_list)) {
1769         struct device *dev = to_device(dpm_prepared_list.prev);
1770 
1771         get_device(dev);
1772 
1773         mutex_unlock(&dpm_list_mtx);
1774 
1775         error = device_suspend(dev);
1776 
1777         mutex_lock(&dpm_list_mtx);
1778 
1779         if (error) {
1780             pm_dev_err(dev, state, "", error);
1781             dpm_save_failed_dev(dev_name(dev));
1782         } else if (!list_empty(&dev->power.entry)) {
1783             list_move(&dev->power.entry, &dpm_suspended_list);
1784         }
1785 
1786         mutex_unlock(&dpm_list_mtx);
1787 
1788         put_device(dev);
1789 
1790         mutex_lock(&dpm_list_mtx);
1791 
1792         if (error || async_error)
1793             break;
1794     }
1795     mutex_unlock(&dpm_list_mtx);
1796     async_synchronize_full();
1797     if (!error)
1798         error = async_error;
1799     if (error) {
1800         suspend_stats.failed_suspend++;
1801         dpm_save_failed_step(SUSPEND_SUSPEND);
1802     }
1803     dpm_show_time(starttime, state, error, NULL);
1804     trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1805     return error;
1806 }
1807 
1808 /**
1809  * device_prepare - Prepare a device for system power transition.
1810  * @dev: Device to handle.
1811  * @state: PM transition of the system being carried out.
1812  *
1813  * Execute the ->prepare() callback(s) for given device.  No new children of the
1814  * device may be registered after this function has returned.
1815  */
1816 static int device_prepare(struct device *dev, pm_message_t state)
1817 {
1818     int (*callback)(struct device *) = NULL;
1819     int ret = 0;
1820 
1821     /*
1822      * If a device's parent goes into runtime suspend at the wrong time,
1823      * it won't be possible to resume the device.  To prevent this we
1824      * block runtime suspend here, during the prepare phase, and allow
1825      * it again during the complete phase.
1826      */
1827     pm_runtime_get_noresume(dev);
1828 
1829     if (dev->power.syscore)
1830         return 0;
1831 
1832     device_lock(dev);
1833 
1834     dev->power.wakeup_path = false;
1835 
1836     if (dev->power.no_pm_callbacks)
1837         goto unlock;
1838 
1839     if (dev->pm_domain)
1840         callback = dev->pm_domain->ops.prepare;
1841     else if (dev->type && dev->type->pm)
1842         callback = dev->type->pm->prepare;
1843     else if (dev->class && dev->class->pm)
1844         callback = dev->class->pm->prepare;
1845     else if (dev->bus && dev->bus->pm)
1846         callback = dev->bus->pm->prepare;
1847 
1848     if (!callback && dev->driver && dev->driver->pm)
1849         callback = dev->driver->pm->prepare;
1850 
1851     if (callback)
1852         ret = callback(dev);
1853 
1854 unlock:
1855     device_unlock(dev);
1856 
1857     if (ret < 0) {
1858         suspend_report_result(dev, callback, ret);
1859         pm_runtime_put(dev);
1860         return ret;
1861     }
1862     /*
1863      * A positive return value from ->prepare() means "this device appears
1864      * to be runtime-suspended and its state is fine, so if it really is
1865      * runtime-suspended, you can leave it in that state provided that you
1866      * will do the same thing with all of its descendants".  This only
1867      * applies to suspend transitions, however.
1868      */
1869     spin_lock_irq(&dev->power.lock);
1870     dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1871         (ret > 0 || dev->power.no_pm_callbacks) &&
1872         !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1873     spin_unlock_irq(&dev->power.lock);
1874     return 0;
1875 }
1876 
1877 /**
1878  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1879  * @state: PM transition of the system being carried out.
1880  *
1881  * Execute the ->prepare() callback(s) for all devices.
1882  */
1883 int dpm_prepare(pm_message_t state)
1884 {
1885     int error = 0;
1886 
1887     trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1888     might_sleep();
1889 
1890     /*
1891      * Give a chance for the known devices to complete their probes, before
1892      * disable probing of devices. This sync point is important at least
1893      * at boot time + hibernation restore.
1894      */
1895     wait_for_device_probe();
1896     /*
1897      * It is unsafe if probing of devices will happen during suspend or
1898      * hibernation and system behavior will be unpredictable in this case.
1899      * So, let's prohibit device's probing here and defer their probes
1900      * instead. The normal behavior will be restored in dpm_complete().
1901      */
1902     device_block_probing();
1903 
1904     mutex_lock(&dpm_list_mtx);
1905     while (!list_empty(&dpm_list) && !error) {
1906         struct device *dev = to_device(dpm_list.next);
1907 
1908         get_device(dev);
1909 
1910         mutex_unlock(&dpm_list_mtx);
1911 
1912         trace_device_pm_callback_start(dev, "", state.event);
1913         error = device_prepare(dev, state);
1914         trace_device_pm_callback_end(dev, error);
1915 
1916         mutex_lock(&dpm_list_mtx);
1917 
1918         if (!error) {
1919             dev->power.is_prepared = true;
1920             if (!list_empty(&dev->power.entry))
1921                 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1922         } else if (error == -EAGAIN) {
1923             error = 0;
1924         } else {
1925             dev_info(dev, "not prepared for power transition: code %d\n",
1926                  error);
1927         }
1928 
1929         mutex_unlock(&dpm_list_mtx);
1930 
1931         put_device(dev);
1932 
1933         mutex_lock(&dpm_list_mtx);
1934     }
1935     mutex_unlock(&dpm_list_mtx);
1936     trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1937     return error;
1938 }
1939 
1940 /**
1941  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1942  * @state: PM transition of the system being carried out.
1943  *
1944  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1945  * callbacks for them.
1946  */
1947 int dpm_suspend_start(pm_message_t state)
1948 {
1949     ktime_t starttime = ktime_get();
1950     int error;
1951 
1952     error = dpm_prepare(state);
1953     if (error) {
1954         suspend_stats.failed_prepare++;
1955         dpm_save_failed_step(SUSPEND_PREPARE);
1956     } else
1957         error = dpm_suspend(state);
1958     dpm_show_time(starttime, state, error, "start");
1959     return error;
1960 }
1961 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1962 
1963 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1964 {
1965     if (ret)
1966         dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1967 }
1968 EXPORT_SYMBOL_GPL(__suspend_report_result);
1969 
1970 /**
1971  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1972  * @subordinate: Device that needs to wait for @dev.
1973  * @dev: Device to wait for.
1974  */
1975 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1976 {
1977     dpm_wait(dev, subordinate->power.async_suspend);
1978     return async_error;
1979 }
1980 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1981 
1982 /**
1983  * dpm_for_each_dev - device iterator.
1984  * @data: data for the callback.
1985  * @fn: function to be called for each device.
1986  *
1987  * Iterate over devices in dpm_list, and call @fn for each device,
1988  * passing it @data.
1989  */
1990 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1991 {
1992     struct device *dev;
1993 
1994     if (!fn)
1995         return;
1996 
1997     device_pm_lock();
1998     list_for_each_entry(dev, &dpm_list, power.entry)
1999         fn(dev, data);
2000     device_pm_unlock();
2001 }
2002 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2003 
2004 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2005 {
2006     if (!ops)
2007         return true;
2008 
2009     return !ops->prepare &&
2010            !ops->suspend &&
2011            !ops->suspend_late &&
2012            !ops->suspend_noirq &&
2013            !ops->resume_noirq &&
2014            !ops->resume_early &&
2015            !ops->resume &&
2016            !ops->complete;
2017 }
2018 
2019 void device_pm_check_callbacks(struct device *dev)
2020 {
2021     unsigned long flags;
2022 
2023     spin_lock_irqsave(&dev->power.lock, flags);
2024     dev->power.no_pm_callbacks =
2025         (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2026          !dev->bus->suspend && !dev->bus->resume)) &&
2027         (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2028         (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2029         (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2030         (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2031          !dev->driver->suspend && !dev->driver->resume));
2032     spin_unlock_irqrestore(&dev->power.lock, flags);
2033 }
2034 
2035 bool dev_pm_skip_suspend(struct device *dev)
2036 {
2037     return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2038         pm_runtime_status_suspended(dev);
2039 }