Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Copyright (C) 2012 Fusion-io  All rights reserved.
0004  * Copyright (C) 2012 Intel Corp. All rights reserved.
0005  */
0006 
0007 #ifndef BTRFS_RAID56_H
0008 #define BTRFS_RAID56_H
0009 
0010 #include <linux/workqueue.h>
0011 #include "volumes.h"
0012 
0013 enum btrfs_rbio_ops {
0014     BTRFS_RBIO_WRITE,
0015     BTRFS_RBIO_READ_REBUILD,
0016     BTRFS_RBIO_PARITY_SCRUB,
0017     BTRFS_RBIO_REBUILD_MISSING,
0018 };
0019 
0020 struct btrfs_raid_bio {
0021     struct btrfs_io_context *bioc;
0022 
0023     /*
0024      * While we're doing RMW on a stripe we put it into a hash table so we
0025      * can lock the stripe and merge more rbios into it.
0026      */
0027     struct list_head hash_list;
0028 
0029     /* LRU list for the stripe cache */
0030     struct list_head stripe_cache;
0031 
0032     /* For scheduling work in the helper threads */
0033     struct work_struct work;
0034 
0035     /*
0036      * bio_list and bio_list_lock are used to add more bios into the stripe
0037      * in hopes of avoiding the full RMW
0038      */
0039     struct bio_list bio_list;
0040     spinlock_t bio_list_lock;
0041 
0042     /*
0043      * Also protected by the bio_list_lock, the plug list is used by the
0044      * plugging code to collect partial bios while plugged.  The stripe
0045      * locking code also uses it to hand off the stripe lock to the next
0046      * pending IO.
0047      */
0048     struct list_head plug_list;
0049 
0050     /* Flags that tell us if it is safe to merge with this bio. */
0051     unsigned long flags;
0052 
0053     /*
0054      * Set if we're doing a parity rebuild for a read from higher up, which
0055      * is handled differently from a parity rebuild as part of RMW.
0056      */
0057     enum btrfs_rbio_ops operation;
0058 
0059     /* How many pages there are for the full stripe including P/Q */
0060     u16 nr_pages;
0061 
0062     /* How many sectors there are for the full stripe including P/Q */
0063     u16 nr_sectors;
0064 
0065     /* Number of data stripes (no p/q) */
0066     u8 nr_data;
0067 
0068     /* Numer of all stripes (including P/Q) */
0069     u8 real_stripes;
0070 
0071     /* How many pages there are for each stripe */
0072     u8 stripe_npages;
0073 
0074     /* How many sectors there are for each stripe */
0075     u8 stripe_nsectors;
0076 
0077     /* First bad stripe, -1 means no corruption */
0078     s8 faila;
0079 
0080     /* Second bad stripe (for RAID6 use) */
0081     s8 failb;
0082 
0083     /* Stripe number that we're scrubbing  */
0084     u8 scrubp;
0085 
0086     /*
0087      * Size of all the bios in the bio_list.  This helps us decide if the
0088      * rbio maps to a full stripe or not.
0089      */
0090     int bio_list_bytes;
0091 
0092     int generic_bio_cnt;
0093 
0094     refcount_t refs;
0095 
0096     atomic_t stripes_pending;
0097 
0098     atomic_t error;
0099 
0100     struct work_struct end_io_work;
0101 
0102     /* Bitmap to record which horizontal stripe has data */
0103     unsigned long dbitmap;
0104 
0105     /* Allocated with stripe_nsectors-many bits for finish_*() calls */
0106     unsigned long finish_pbitmap;
0107 
0108     /*
0109      * These are two arrays of pointers.  We allocate the rbio big enough
0110      * to hold them both and setup their locations when the rbio is
0111      * allocated.
0112      */
0113 
0114     /*
0115      * Pointers to pages that we allocated for reading/writing stripes
0116      * directly from the disk (including P/Q).
0117      */
0118     struct page **stripe_pages;
0119 
0120     /* Pointers to the sectors in the bio_list, for faster lookup */
0121     struct sector_ptr *bio_sectors;
0122 
0123     /*
0124      * For subpage support, we need to map each sector to above
0125      * stripe_pages.
0126      */
0127     struct sector_ptr *stripe_sectors;
0128 
0129     /* Allocated with real_stripes-many pointers for finish_*() calls */
0130     void **finish_pointers;
0131 };
0132 
0133 /*
0134  * For trace event usage only. Records useful debug info for each bio submitted
0135  * by RAID56 to each physical device.
0136  *
0137  * No matter signed or not, (-1) is always the one indicating we can not grab
0138  * the proper stripe number.
0139  */
0140 struct raid56_bio_trace_info {
0141     u64 devid;
0142 
0143     /* The offset inside the stripe. (<= STRIPE_LEN) */
0144     u32 offset;
0145 
0146     /*
0147      * Stripe number.
0148      * 0 is the first data stripe, and nr_data for P stripe,
0149      * nr_data + 1 for Q stripe.
0150      * >= real_stripes for
0151      */
0152     u8 stripe_nr;
0153 };
0154 
0155 static inline int nr_data_stripes(const struct map_lookup *map)
0156 {
0157     return map->num_stripes - btrfs_nr_parity_stripes(map->type);
0158 }
0159 
0160 #define RAID5_P_STRIPE ((u64)-2)
0161 #define RAID6_Q_STRIPE ((u64)-1)
0162 
0163 #define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) ||     \
0164                  ((x) == RAID6_Q_STRIPE))
0165 
0166 struct btrfs_device;
0167 
0168 void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
0169                int mirror_num, bool generic_io);
0170 void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
0171 
0172 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
0173                 unsigned int pgoff, u64 logical);
0174 
0175 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
0176                 struct btrfs_io_context *bioc,
0177                 struct btrfs_device *scrub_dev,
0178                 unsigned long *dbitmap, int stripe_nsectors);
0179 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
0180 
0181 struct btrfs_raid_bio *
0182 raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc);
0183 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
0184 
0185 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
0186 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
0187 
0188 #endif