Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Functions related to generic timeout handling of requests.
0004  */
0005 #include <linux/kernel.h>
0006 #include <linux/module.h>
0007 #include <linux/blkdev.h>
0008 #include <linux/fault-inject.h>
0009 
0010 #include "blk.h"
0011 #include "blk-mq.h"
0012 
0013 #ifdef CONFIG_FAIL_IO_TIMEOUT
0014 
0015 static DECLARE_FAULT_ATTR(fail_io_timeout);
0016 
0017 static int __init setup_fail_io_timeout(char *str)
0018 {
0019     return setup_fault_attr(&fail_io_timeout, str);
0020 }
0021 __setup("fail_io_timeout=", setup_fail_io_timeout);
0022 
0023 bool __blk_should_fake_timeout(struct request_queue *q)
0024 {
0025     return should_fail(&fail_io_timeout, 1);
0026 }
0027 EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
0028 
0029 static int __init fail_io_timeout_debugfs(void)
0030 {
0031     struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
0032                         NULL, &fail_io_timeout);
0033 
0034     return PTR_ERR_OR_ZERO(dir);
0035 }
0036 
0037 late_initcall(fail_io_timeout_debugfs);
0038 
0039 ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
0040               char *buf)
0041 {
0042     struct gendisk *disk = dev_to_disk(dev);
0043     int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
0044 
0045     return sprintf(buf, "%d\n", set != 0);
0046 }
0047 
0048 ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
0049                const char *buf, size_t count)
0050 {
0051     struct gendisk *disk = dev_to_disk(dev);
0052     int val;
0053 
0054     if (count) {
0055         struct request_queue *q = disk->queue;
0056         char *p = (char *) buf;
0057 
0058         val = simple_strtoul(p, &p, 10);
0059         if (val)
0060             blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
0061         else
0062             blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
0063     }
0064 
0065     return count;
0066 }
0067 
0068 #endif /* CONFIG_FAIL_IO_TIMEOUT */
0069 
0070 /**
0071  * blk_abort_request - Request recovery for the specified command
0072  * @req:    pointer to the request of interest
0073  *
0074  * This function requests that the block layer start recovery for the
0075  * request by deleting the timer and calling the q's timeout function.
0076  * LLDDs who implement their own error recovery MAY ignore the timeout
0077  * event if they generated blk_abort_request.
0078  */
0079 void blk_abort_request(struct request *req)
0080 {
0081     /*
0082      * All we need to ensure is that timeout scan takes place
0083      * immediately and that scan sees the new timeout value.
0084      * No need for fancy synchronizations.
0085      */
0086     WRITE_ONCE(req->deadline, jiffies);
0087     kblockd_schedule_work(&req->q->timeout_work);
0088 }
0089 EXPORT_SYMBOL_GPL(blk_abort_request);
0090 
0091 static unsigned long blk_timeout_mask __read_mostly;
0092 
0093 static int __init blk_timeout_init(void)
0094 {
0095     blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
0096     return 0;
0097 }
0098 
0099 late_initcall(blk_timeout_init);
0100 
0101 /*
0102  * Just a rough estimate, we don't care about specific values for timeouts.
0103  */
0104 static inline unsigned long blk_round_jiffies(unsigned long j)
0105 {
0106     return (j + blk_timeout_mask) + 1;
0107 }
0108 
0109 unsigned long blk_rq_timeout(unsigned long timeout)
0110 {
0111     unsigned long maxt;
0112 
0113     maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
0114     if (time_after(timeout, maxt))
0115         timeout = maxt;
0116 
0117     return timeout;
0118 }
0119 
0120 /**
0121  * blk_add_timer - Start timeout timer for a single request
0122  * @req:    request that is about to start running.
0123  *
0124  * Notes:
0125  *    Each request has its own timer, and as it is added to the queue, we
0126  *    set up the timer. When the request completes, we cancel the timer.
0127  */
0128 void blk_add_timer(struct request *req)
0129 {
0130     struct request_queue *q = req->q;
0131     unsigned long expiry;
0132 
0133     /*
0134      * Some LLDs, like scsi, peek at the timeout to prevent a
0135      * command from being retried forever.
0136      */
0137     if (!req->timeout)
0138         req->timeout = q->rq_timeout;
0139 
0140     req->rq_flags &= ~RQF_TIMED_OUT;
0141 
0142     expiry = jiffies + req->timeout;
0143     WRITE_ONCE(req->deadline, expiry);
0144 
0145     /*
0146      * If the timer isn't already pending or this timeout is earlier
0147      * than an existing one, modify the timer. Round up to next nearest
0148      * second.
0149      */
0150     expiry = blk_rq_timeout(blk_round_jiffies(expiry));
0151 
0152     if (!timer_pending(&q->timeout) ||
0153         time_before(expiry, q->timeout.expires)) {
0154         unsigned long diff = q->timeout.expires - expiry;
0155 
0156         /*
0157          * Due to added timer slack to group timers, the timer
0158          * will often be a little in front of what we asked for.
0159          * So apply some tolerance here too, otherwise we keep
0160          * modifying the timer because expires for value X
0161          * will be X + something.
0162          */
0163         if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
0164             mod_timer(&q->timeout, expiry);
0165     }
0166 
0167 }