Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include <linux/blk-mq.h>
0004 #include <linux/blk-pm.h>
0005 #include <linux/blkdev.h>
0006 #include <linux/pm_runtime.h>
0007 #include "blk-mq.h"
0008 #include "blk-mq-tag.h"
0009 
0010 /**
0011  * blk_pm_runtime_init - Block layer runtime PM initialization routine
0012  * @q: the queue of the device
0013  * @dev: the device the queue belongs to
0014  *
0015  * Description:
0016  *    Initialize runtime-PM-related fields for @q and start auto suspend for
0017  *    @dev. Drivers that want to take advantage of request-based runtime PM
0018  *    should call this function after @dev has been initialized, and its
0019  *    request queue @q has been allocated, and runtime PM for it can not happen
0020  *    yet(either due to disabled/forbidden or its usage_count > 0). In most
0021  *    cases, driver should call this function before any I/O has taken place.
0022  *
0023  *    This function takes care of setting up using auto suspend for the device,
0024  *    the autosuspend delay is set to -1 to make runtime suspend impossible
0025  *    until an updated value is either set by user or by driver. Drivers do
0026  *    not need to touch other autosuspend settings.
0027  *
0028  *    The block layer runtime PM is request based, so only works for drivers
0029  *    that use request as their IO unit instead of those directly use bio's.
0030  */
0031 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
0032 {
0033     q->dev = dev;
0034     q->rpm_status = RPM_ACTIVE;
0035     pm_runtime_set_autosuspend_delay(q->dev, -1);
0036     pm_runtime_use_autosuspend(q->dev);
0037 }
0038 EXPORT_SYMBOL(blk_pm_runtime_init);
0039 
0040 /**
0041  * blk_pre_runtime_suspend - Pre runtime suspend check
0042  * @q: the queue of the device
0043  *
0044  * Description:
0045  *    This function will check if runtime suspend is allowed for the device
0046  *    by examining if there are any requests pending in the queue. If there
0047  *    are requests pending, the device can not be runtime suspended; otherwise,
0048  *    the queue's status will be updated to SUSPENDING and the driver can
0049  *    proceed to suspend the device.
0050  *
0051  *    For the not allowed case, we mark last busy for the device so that
0052  *    runtime PM core will try to autosuspend it some time later.
0053  *
0054  *    This function should be called near the start of the device's
0055  *    runtime_suspend callback.
0056  *
0057  * Return:
0058  *    0     - OK to runtime suspend the device
0059  *    -EBUSY    - Device should not be runtime suspended
0060  */
0061 int blk_pre_runtime_suspend(struct request_queue *q)
0062 {
0063     int ret = 0;
0064 
0065     if (!q->dev)
0066         return ret;
0067 
0068     WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
0069 
0070     spin_lock_irq(&q->queue_lock);
0071     q->rpm_status = RPM_SUSPENDING;
0072     spin_unlock_irq(&q->queue_lock);
0073 
0074     /*
0075      * Increase the pm_only counter before checking whether any
0076      * non-PM blk_queue_enter() calls are in progress to avoid that any
0077      * new non-PM blk_queue_enter() calls succeed before the pm_only
0078      * counter is decreased again.
0079      */
0080     blk_set_pm_only(q);
0081     ret = -EBUSY;
0082     /* Switch q_usage_counter from per-cpu to atomic mode. */
0083     blk_freeze_queue_start(q);
0084     /*
0085      * Wait until atomic mode has been reached. Since that
0086      * involves calling call_rcu(), it is guaranteed that later
0087      * blk_queue_enter() calls see the pm-only state. See also
0088      * http://lwn.net/Articles/573497/.
0089      */
0090     percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
0091     if (percpu_ref_is_zero(&q->q_usage_counter))
0092         ret = 0;
0093     /* Switch q_usage_counter back to per-cpu mode. */
0094     blk_mq_unfreeze_queue(q);
0095 
0096     if (ret < 0) {
0097         spin_lock_irq(&q->queue_lock);
0098         q->rpm_status = RPM_ACTIVE;
0099         pm_runtime_mark_last_busy(q->dev);
0100         spin_unlock_irq(&q->queue_lock);
0101 
0102         blk_clear_pm_only(q);
0103     }
0104 
0105     return ret;
0106 }
0107 EXPORT_SYMBOL(blk_pre_runtime_suspend);
0108 
0109 /**
0110  * blk_post_runtime_suspend - Post runtime suspend processing
0111  * @q: the queue of the device
0112  * @err: return value of the device's runtime_suspend function
0113  *
0114  * Description:
0115  *    Update the queue's runtime status according to the return value of the
0116  *    device's runtime suspend function and mark last busy for the device so
0117  *    that PM core will try to auto suspend the device at a later time.
0118  *
0119  *    This function should be called near the end of the device's
0120  *    runtime_suspend callback.
0121  */
0122 void blk_post_runtime_suspend(struct request_queue *q, int err)
0123 {
0124     if (!q->dev)
0125         return;
0126 
0127     spin_lock_irq(&q->queue_lock);
0128     if (!err) {
0129         q->rpm_status = RPM_SUSPENDED;
0130     } else {
0131         q->rpm_status = RPM_ACTIVE;
0132         pm_runtime_mark_last_busy(q->dev);
0133     }
0134     spin_unlock_irq(&q->queue_lock);
0135 
0136     if (err)
0137         blk_clear_pm_only(q);
0138 }
0139 EXPORT_SYMBOL(blk_post_runtime_suspend);
0140 
0141 /**
0142  * blk_pre_runtime_resume - Pre runtime resume processing
0143  * @q: the queue of the device
0144  *
0145  * Description:
0146  *    Update the queue's runtime status to RESUMING in preparation for the
0147  *    runtime resume of the device.
0148  *
0149  *    This function should be called near the start of the device's
0150  *    runtime_resume callback.
0151  */
0152 void blk_pre_runtime_resume(struct request_queue *q)
0153 {
0154     if (!q->dev)
0155         return;
0156 
0157     spin_lock_irq(&q->queue_lock);
0158     q->rpm_status = RPM_RESUMING;
0159     spin_unlock_irq(&q->queue_lock);
0160 }
0161 EXPORT_SYMBOL(blk_pre_runtime_resume);
0162 
0163 /**
0164  * blk_post_runtime_resume - Post runtime resume processing
0165  * @q: the queue of the device
0166  *
0167  * Description:
0168  *    For historical reasons, this routine merely calls blk_set_runtime_active()
0169  *    to do the real work of restarting the queue.  It does this regardless of
0170  *    whether the device's runtime-resume succeeded; even if it failed the
0171  *    driver or error handler will need to communicate with the device.
0172  *
0173  *    This function should be called near the end of the device's
0174  *    runtime_resume callback.
0175  */
0176 void blk_post_runtime_resume(struct request_queue *q)
0177 {
0178     blk_set_runtime_active(q);
0179 }
0180 EXPORT_SYMBOL(blk_post_runtime_resume);
0181 
0182 /**
0183  * blk_set_runtime_active - Force runtime status of the queue to be active
0184  * @q: the queue of the device
0185  *
0186  * If the device is left runtime suspended during system suspend the resume
0187  * hook typically resumes the device and corrects runtime status
0188  * accordingly. However, that does not affect the queue runtime PM status
0189  * which is still "suspended". This prevents processing requests from the
0190  * queue.
0191  *
0192  * This function can be used in driver's resume hook to correct queue
0193  * runtime PM status and re-enable peeking requests from the queue. It
0194  * should be called before first request is added to the queue.
0195  *
0196  * This function is also called by blk_post_runtime_resume() for
0197  * runtime resumes.  It does everything necessary to restart the queue.
0198  */
0199 void blk_set_runtime_active(struct request_queue *q)
0200 {
0201     int old_status;
0202 
0203     if (!q->dev)
0204         return;
0205 
0206     spin_lock_irq(&q->queue_lock);
0207     old_status = q->rpm_status;
0208     q->rpm_status = RPM_ACTIVE;
0209     pm_runtime_mark_last_busy(q->dev);
0210     pm_request_autosuspend(q->dev);
0211     spin_unlock_irq(&q->queue_lock);
0212 
0213     if (old_status != RPM_ACTIVE)
0214         blk_clear_pm_only(q);
0215 }
0216 EXPORT_SYMBOL(blk_set_runtime_active);