Back to home page

LXR

 
 

    


0001 /*
0002  * Functions related to generic timeout handling of requests.
0003  */
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/blkdev.h>
0007 #include <linux/fault-inject.h>
0008 
0009 #include "blk.h"
0010 #include "blk-mq.h"
0011 
0012 #ifdef CONFIG_FAIL_IO_TIMEOUT
0013 
0014 static DECLARE_FAULT_ATTR(fail_io_timeout);
0015 
0016 static int __init setup_fail_io_timeout(char *str)
0017 {
0018     return setup_fault_attr(&fail_io_timeout, str);
0019 }
0020 __setup("fail_io_timeout=", setup_fail_io_timeout);
0021 
0022 int blk_should_fake_timeout(struct request_queue *q)
0023 {
0024     if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
0025         return 0;
0026 
0027     return should_fail(&fail_io_timeout, 1);
0028 }
0029 
0030 static int __init fail_io_timeout_debugfs(void)
0031 {
0032     struct dentry *dir = fault_create_debugfs_attr("fail_io_timeout",
0033                         NULL, &fail_io_timeout);
0034 
0035     return PTR_ERR_OR_ZERO(dir);
0036 }
0037 
0038 late_initcall(fail_io_timeout_debugfs);
0039 
0040 ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
0041               char *buf)
0042 {
0043     struct gendisk *disk = dev_to_disk(dev);
0044     int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
0045 
0046     return sprintf(buf, "%d\n", set != 0);
0047 }
0048 
0049 ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
0050                const char *buf, size_t count)
0051 {
0052     struct gendisk *disk = dev_to_disk(dev);
0053     int val;
0054 
0055     if (count) {
0056         struct request_queue *q = disk->queue;
0057         char *p = (char *) buf;
0058 
0059         val = simple_strtoul(p, &p, 10);
0060         spin_lock_irq(q->queue_lock);
0061         if (val)
0062             queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
0063         else
0064             queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
0065         spin_unlock_irq(q->queue_lock);
0066     }
0067 
0068     return count;
0069 }
0070 
0071 #endif /* CONFIG_FAIL_IO_TIMEOUT */
0072 
0073 /*
0074  * blk_delete_timer - Delete/cancel timer for a given function.
0075  * @req:    request that we are canceling timer for
0076  *
0077  */
0078 void blk_delete_timer(struct request *req)
0079 {
0080     list_del_init(&req->timeout_list);
0081 }
0082 
0083 static void blk_rq_timed_out(struct request *req)
0084 {
0085     struct request_queue *q = req->q;
0086     enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
0087 
0088     if (q->rq_timed_out_fn)
0089         ret = q->rq_timed_out_fn(req);
0090     switch (ret) {
0091     case BLK_EH_HANDLED:
0092         /* Can we use req->errors here? */
0093         __blk_complete_request(req);
0094         break;
0095     case BLK_EH_RESET_TIMER:
0096         blk_add_timer(req);
0097         blk_clear_rq_complete(req);
0098         break;
0099     case BLK_EH_NOT_HANDLED:
0100         /*
0101          * LLD handles this for now but in the future
0102          * we can send a request msg to abort the command
0103          * and we can move more of the generic scsi eh code to
0104          * the blk layer.
0105          */
0106         break;
0107     default:
0108         printk(KERN_ERR "block: bad eh return: %d\n", ret);
0109         break;
0110     }
0111 }
0112 
0113 static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
0114               unsigned int *next_set)
0115 {
0116     if (time_after_eq(jiffies, rq->deadline)) {
0117         list_del_init(&rq->timeout_list);
0118 
0119         /*
0120          * Check if we raced with end io completion
0121          */
0122         if (!blk_mark_rq_complete(rq))
0123             blk_rq_timed_out(rq);
0124     } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
0125         *next_timeout = rq->deadline;
0126         *next_set = 1;
0127     }
0128 }
0129 
0130 void blk_timeout_work(struct work_struct *work)
0131 {
0132     struct request_queue *q =
0133         container_of(work, struct request_queue, timeout_work);
0134     unsigned long flags, next = 0;
0135     struct request *rq, *tmp;
0136     int next_set = 0;
0137 
0138     if (blk_queue_enter(q, true))
0139         return;
0140     spin_lock_irqsave(q->queue_lock, flags);
0141 
0142     list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
0143         blk_rq_check_expired(rq, &next, &next_set);
0144 
0145     if (next_set)
0146         mod_timer(&q->timeout, round_jiffies_up(next));
0147 
0148     spin_unlock_irqrestore(q->queue_lock, flags);
0149     blk_queue_exit(q);
0150 }
0151 
0152 /**
0153  * blk_abort_request -- Request request recovery for the specified command
0154  * @req:    pointer to the request of interest
0155  *
0156  * This function requests that the block layer start recovery for the
0157  * request by deleting the timer and calling the q's timeout function.
0158  * LLDDs who implement their own error recovery MAY ignore the timeout
0159  * event if they generated blk_abort_req. Must hold queue lock.
0160  */
0161 void blk_abort_request(struct request *req)
0162 {
0163     if (blk_mark_rq_complete(req))
0164         return;
0165 
0166     if (req->q->mq_ops) {
0167         blk_mq_rq_timed_out(req, false);
0168     } else {
0169         blk_delete_timer(req);
0170         blk_rq_timed_out(req);
0171     }
0172 }
0173 EXPORT_SYMBOL_GPL(blk_abort_request);
0174 
0175 unsigned long blk_rq_timeout(unsigned long timeout)
0176 {
0177     unsigned long maxt;
0178 
0179     maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
0180     if (time_after(timeout, maxt))
0181         timeout = maxt;
0182 
0183     return timeout;
0184 }
0185 
0186 /**
0187  * blk_add_timer - Start timeout timer for a single request
0188  * @req:    request that is about to start running.
0189  *
0190  * Notes:
0191  *    Each request has its own timer, and as it is added to the queue, we
0192  *    set up the timer. When the request completes, we cancel the timer.
0193  *    Queue lock must be held for the non-mq case, mq case doesn't care.
0194  */
0195 void blk_add_timer(struct request *req)
0196 {
0197     struct request_queue *q = req->q;
0198     unsigned long expiry;
0199 
0200     /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
0201     if (!q->mq_ops && !q->rq_timed_out_fn)
0202         return;
0203 
0204     BUG_ON(!list_empty(&req->timeout_list));
0205 
0206     /*
0207      * Some LLDs, like scsi, peek at the timeout to prevent a
0208      * command from being retried forever.
0209      */
0210     if (!req->timeout)
0211         req->timeout = q->rq_timeout;
0212 
0213     req->deadline = jiffies + req->timeout;
0214 
0215     /*
0216      * Only the non-mq case needs to add the request to a protected list.
0217      * For the mq case we simply scan the tag map.
0218      */
0219     if (!q->mq_ops)
0220         list_add_tail(&req->timeout_list, &req->q->timeout_list);
0221 
0222     /*
0223      * If the timer isn't already pending or this timeout is earlier
0224      * than an existing one, modify the timer. Round up to next nearest
0225      * second.
0226      */
0227     expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
0228 
0229     if (!timer_pending(&q->timeout) ||
0230         time_before(expiry, q->timeout.expires)) {
0231         unsigned long diff = q->timeout.expires - expiry;
0232 
0233         /*
0234          * Due to added timer slack to group timers, the timer
0235          * will often be a little in front of what we asked for.
0236          * So apply some tolerance here too, otherwise we keep
0237          * modifying the timer because expires for value X
0238          * will be X + something.
0239          */
0240         if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
0241             mod_timer(&q->timeout, expiry);
0242     }
0243 
0244 }