0001
0002
0003
0004
0005 #include <linux/kernel.h>
0006 #include <linux/module.h>
0007 #include <linux/blkdev.h>
0008 #include <linux/fault-inject.h>
0009
0010 #include "blk.h"
0011 #include "blk-mq.h"
0012
0013 #ifdef CONFIG_FAIL_IO_TIMEOUT
0014
0015 static DECLARE_FAULT_ATTR(fail_io_timeout);
0016
0017 static int __init setup_fail_io_timeout(char *str)
0018 {
0019 return setup_fault_attr(&fail_io_timeout, str);
0020 }
0021 __setup("fail_io_timeout=", setup_fail_io_timeout);
0022
0023 bool __blk_should_fake_timeout(struct request_queue *q)
0024 {
0025 return should_fail(&fail_io_timeout, 1);
0026 }
0027 EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
0028
0029 static int __init fail_io_timeout_debugfs(void)
0030 {
0031 struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
0032 NULL, &fail_io_timeout);
0033
0034 return PTR_ERR_OR_ZERO(dir);
0035 }
0036
0037 late_initcall(fail_io_timeout_debugfs);
0038
0039 ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
0040 char *buf)
0041 {
0042 struct gendisk *disk = dev_to_disk(dev);
0043 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
0044
0045 return sprintf(buf, "%d\n", set != 0);
0046 }
0047
0048 ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
0049 const char *buf, size_t count)
0050 {
0051 struct gendisk *disk = dev_to_disk(dev);
0052 int val;
0053
0054 if (count) {
0055 struct request_queue *q = disk->queue;
0056 char *p = (char *) buf;
0057
0058 val = simple_strtoul(p, &p, 10);
0059 if (val)
0060 blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
0061 else
0062 blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
0063 }
0064
0065 return count;
0066 }
0067
0068 #endif
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 void blk_abort_request(struct request *req)
0080 {
0081
0082
0083
0084
0085
0086 WRITE_ONCE(req->deadline, jiffies);
0087 kblockd_schedule_work(&req->q->timeout_work);
0088 }
0089 EXPORT_SYMBOL_GPL(blk_abort_request);
0090
0091 static unsigned long blk_timeout_mask __read_mostly;
0092
0093 static int __init blk_timeout_init(void)
0094 {
0095 blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
0096 return 0;
0097 }
0098
0099 late_initcall(blk_timeout_init);
0100
0101
0102
0103
0104 static inline unsigned long blk_round_jiffies(unsigned long j)
0105 {
0106 return (j + blk_timeout_mask) + 1;
0107 }
0108
0109 unsigned long blk_rq_timeout(unsigned long timeout)
0110 {
0111 unsigned long maxt;
0112
0113 maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
0114 if (time_after(timeout, maxt))
0115 timeout = maxt;
0116
0117 return timeout;
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 void blk_add_timer(struct request *req)
0129 {
0130 struct request_queue *q = req->q;
0131 unsigned long expiry;
0132
0133
0134
0135
0136
0137 if (!req->timeout)
0138 req->timeout = q->rq_timeout;
0139
0140 req->rq_flags &= ~RQF_TIMED_OUT;
0141
0142 expiry = jiffies + req->timeout;
0143 WRITE_ONCE(req->deadline, expiry);
0144
0145
0146
0147
0148
0149
0150 expiry = blk_rq_timeout(blk_round_jiffies(expiry));
0151
0152 if (!timer_pending(&q->timeout) ||
0153 time_before(expiry, q->timeout.expires)) {
0154 unsigned long diff = q->timeout.expires - expiry;
0155
0156
0157
0158
0159
0160
0161
0162
0163 if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
0164 mod_timer(&q->timeout, expiry);
0165 }
0166
0167 }