Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Some low level IO code, and hacks for various block layer limitations
0004  *
0005  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
0006  * Copyright 2012 Google, Inc.
0007  */
0008 
0009 #include "bcache.h"
0010 #include "bset.h"
0011 #include "debug.h"
0012 
0013 #include <linux/blkdev.h>
0014 
0015 /* Bios with headers */
0016 
0017 void bch_bbio_free(struct bio *bio, struct cache_set *c)
0018 {
0019     struct bbio *b = container_of(bio, struct bbio, bio);
0020 
0021     mempool_free(b, &c->bio_meta);
0022 }
0023 
0024 struct bio *bch_bbio_alloc(struct cache_set *c)
0025 {
0026     struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
0027     struct bio *bio = &b->bio;
0028 
0029     bio_init(bio, NULL, bio->bi_inline_vecs,
0030          meta_bucket_pages(&c->cache->sb), 0);
0031 
0032     return bio;
0033 }
0034 
0035 void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
0036 {
0037     struct bbio *b = container_of(bio, struct bbio, bio);
0038 
0039     bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
0040     bio_set_dev(bio, c->cache->bdev);
0041 
0042     b->submit_time_us = local_clock_us();
0043     closure_bio_submit(c, bio, bio->bi_private);
0044 }
0045 
0046 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
0047              struct bkey *k, unsigned int ptr)
0048 {
0049     struct bbio *b = container_of(bio, struct bbio, bio);
0050 
0051     bch_bkey_copy_single_ptr(&b->key, k, ptr);
0052     __bch_submit_bbio(bio, c);
0053 }
0054 
0055 /* IO errors */
0056 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
0057 {
0058     unsigned int errors;
0059 
0060     WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
0061 
0062     /*
0063      * Read-ahead requests on a degrading and recovering md raid
0064      * (e.g. raid6) device might be failured immediately by md
0065      * raid code, which is not a real hardware media failure. So
0066      * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
0067      */
0068     if (bio->bi_opf & REQ_RAHEAD) {
0069         pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
0070                     dc->bdev);
0071         return;
0072     }
0073 
0074     errors = atomic_add_return(1, &dc->io_errors);
0075     if (errors < dc->error_limit)
0076         pr_err("%pg: IO error on backing device, unrecoverable\n",
0077             dc->bdev);
0078     else
0079         bch_cached_dev_error(dc);
0080 }
0081 
0082 void bch_count_io_errors(struct cache *ca,
0083              blk_status_t error,
0084              int is_read,
0085              const char *m)
0086 {
0087     /*
0088      * The halflife of an error is:
0089      * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
0090      */
0091 
0092     if (ca->set->error_decay) {
0093         unsigned int count = atomic_inc_return(&ca->io_count);
0094 
0095         while (count > ca->set->error_decay) {
0096             unsigned int errors;
0097             unsigned int old = count;
0098             unsigned int new = count - ca->set->error_decay;
0099 
0100             /*
0101              * First we subtract refresh from count; each time we
0102              * successfully do so, we rescale the errors once:
0103              */
0104 
0105             count = atomic_cmpxchg(&ca->io_count, old, new);
0106 
0107             if (count == old) {
0108                 count = new;
0109 
0110                 errors = atomic_read(&ca->io_errors);
0111                 do {
0112                     old = errors;
0113                     new = ((uint64_t) errors * 127) / 128;
0114                     errors = atomic_cmpxchg(&ca->io_errors,
0115                                 old, new);
0116                 } while (old != errors);
0117             }
0118         }
0119     }
0120 
0121     if (error) {
0122         unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
0123                             &ca->io_errors);
0124         errors >>= IO_ERROR_SHIFT;
0125 
0126         if (errors < ca->set->error_limit)
0127             pr_err("%pg: IO error on %s%s\n",
0128                    ca->bdev, m,
0129                    is_read ? ", recovering." : ".");
0130         else
0131             bch_cache_set_error(ca->set,
0132                         "%pg: too many IO errors %s\n",
0133                         ca->bdev, m);
0134     }
0135 }
0136 
0137 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
0138                   blk_status_t error, const char *m)
0139 {
0140     struct bbio *b = container_of(bio, struct bbio, bio);
0141     struct cache *ca = c->cache;
0142     int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
0143 
0144     unsigned int threshold = op_is_write(bio_op(bio))
0145         ? c->congested_write_threshold_us
0146         : c->congested_read_threshold_us;
0147 
0148     if (threshold) {
0149         unsigned int t = local_clock_us();
0150         int us = t - b->submit_time_us;
0151         int congested = atomic_read(&c->congested);
0152 
0153         if (us > (int) threshold) {
0154             int ms = us / 1024;
0155 
0156             c->congested_last_us = t;
0157 
0158             ms = min(ms, CONGESTED_MAX + congested);
0159             atomic_sub(ms, &c->congested);
0160         } else if (congested < 0)
0161             atomic_inc(&c->congested);
0162     }
0163 
0164     bch_count_io_errors(ca, error, is_read, m);
0165 }
0166 
0167 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
0168             blk_status_t error, const char *m)
0169 {
0170     struct closure *cl = bio->bi_private;
0171 
0172     bch_bbio_count_io_errors(c, bio, error, m);
0173     bio_put(bio);
0174     closure_put(cl);
0175 }