Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
0004  * All Rights Reserved.
0005  */
0006 #ifndef __XFS_BUF_H__
0007 #define __XFS_BUF_H__
0008 
0009 #include <linux/list.h>
0010 #include <linux/types.h>
0011 #include <linux/spinlock.h>
0012 #include <linux/mm.h>
0013 #include <linux/fs.h>
0014 #include <linux/dax.h>
0015 #include <linux/uio.h>
0016 #include <linux/list_lru.h>
0017 
0018 extern struct kmem_cache *xfs_buf_cache;
0019 
0020 /*
0021  *  Base types
0022  */
0023 struct xfs_buf;
0024 
0025 #define XFS_BUF_DADDR_NULL  ((xfs_daddr_t) (-1LL))
0026 
0027 #define XBF_READ     (1u << 0) /* buffer intended for reading from device */
0028 #define XBF_WRITE    (1u << 1) /* buffer intended for writing to device */
0029 #define XBF_READ_AHEAD   (1u << 2) /* asynchronous read-ahead */
0030 #define XBF_NO_IOACCT    (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
0031 #define XBF_ASYNC    (1u << 4) /* initiator will not wait for completion */
0032 #define XBF_DONE     (1u << 5) /* all pages in the buffer uptodate */
0033 #define XBF_STALE    (1u << 6) /* buffer has been staled, do not find it */
0034 #define XBF_WRITE_FAIL   (1u << 7) /* async writes have failed on this buffer */
0035 
0036 /* buffer type flags for write callbacks */
0037 #define _XBF_INODES  (1u << 16)/* inode buffer */
0038 #define _XBF_DQUOTS  (1u << 17)/* dquot buffer */
0039 #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
0040 
0041 /* flags used only internally */
0042 #define _XBF_PAGES   (1u << 20)/* backed by refcounted pages */
0043 #define _XBF_KMEM    (1u << 21)/* backed by heap memory */
0044 #define _XBF_DELWRI_Q    (1u << 22)/* buffer on a delwri queue */
0045 
0046 /* flags used only as arguments to access routines */
0047 #define XBF_INCORE   (1u << 29)/* lookup only, return if found in cache */
0048 #define XBF_TRYLOCK  (1u << 30)/* lock requested, but do not wait */
0049 #define XBF_UNMAPPED     (1u << 31)/* do not map the buffer */
0050 
0051 
0052 typedef unsigned int xfs_buf_flags_t;
0053 
0054 #define XFS_BUF_FLAGS \
0055     { XBF_READ,     "READ" }, \
0056     { XBF_WRITE,        "WRITE" }, \
0057     { XBF_READ_AHEAD,   "READ_AHEAD" }, \
0058     { XBF_NO_IOACCT,    "NO_IOACCT" }, \
0059     { XBF_ASYNC,        "ASYNC" }, \
0060     { XBF_DONE,     "DONE" }, \
0061     { XBF_STALE,        "STALE" }, \
0062     { XBF_WRITE_FAIL,   "WRITE_FAIL" }, \
0063     { _XBF_INODES,      "INODES" }, \
0064     { _XBF_DQUOTS,      "DQUOTS" }, \
0065     { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
0066     { _XBF_PAGES,       "PAGES" }, \
0067     { _XBF_KMEM,        "KMEM" }, \
0068     { _XBF_DELWRI_Q,    "DELWRI_Q" }, \
0069     /* The following interface flags should never be set */ \
0070     { XBF_INCORE,       "INCORE" }, \
0071     { XBF_TRYLOCK,      "TRYLOCK" }, \
0072     { XBF_UNMAPPED,     "UNMAPPED" }
0073 
0074 /*
0075  * Internal state flags.
0076  */
0077 #define XFS_BSTATE_DISPOSE   (1 << 0)   /* buffer being discarded */
0078 #define XFS_BSTATE_IN_FLIGHT     (1 << 1)   /* I/O in flight */
0079 
0080 /*
0081  * The xfs_buftarg contains 2 notions of "sector size" -
0082  *
0083  * 1) The metadata sector size, which is the minimum unit and
0084  *    alignment of IO which will be performed by metadata operations.
0085  * 2) The device logical sector size
0086  *
0087  * The first is specified at mkfs time, and is stored on-disk in the
0088  * superblock's sb_sectsize.
0089  *
0090  * The latter is derived from the underlying device, and controls direct IO
0091  * alignment constraints.
0092  */
0093 typedef struct xfs_buftarg {
0094     dev_t           bt_dev;
0095     struct block_device *bt_bdev;
0096     struct dax_device   *bt_daxdev;
0097     u64         bt_dax_part_off;
0098     struct xfs_mount    *bt_mount;
0099     unsigned int        bt_meta_sectorsize;
0100     size_t          bt_meta_sectormask;
0101     size_t          bt_logical_sectorsize;
0102     size_t          bt_logical_sectormask;
0103 
0104     /* LRU control structures */
0105     struct shrinker     bt_shrinker;
0106     struct list_lru     bt_lru;
0107 
0108     struct percpu_counter   bt_io_count;
0109     struct ratelimit_state  bt_ioerror_rl;
0110 } xfs_buftarg_t;
0111 
0112 #define XB_PAGES    2
0113 
0114 struct xfs_buf_map {
0115     xfs_daddr_t     bm_bn;  /* block number for I/O */
0116     int         bm_len; /* size of I/O */
0117 };
0118 
0119 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
0120     struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
0121 
0122 struct xfs_buf_ops {
0123     char *name;
0124     union {
0125         __be32 magic[2];    /* v4 and v5 on disk magic values */
0126         __be16 magic16[2];  /* v4 and v5 on disk magic values */
0127     };
0128     void (*verify_read)(struct xfs_buf *);
0129     void (*verify_write)(struct xfs_buf *);
0130     xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
0131 };
0132 
0133 struct xfs_buf {
0134     /*
0135      * first cacheline holds all the fields needed for an uncontended cache
0136      * hit to be fully processed. The semaphore straddles the cacheline
0137      * boundary, but the counter and lock sits on the first cacheline,
0138      * which is the only bit that is touched if we hit the semaphore
0139      * fast-path on locking.
0140      */
0141     struct rhash_head   b_rhash_head;   /* pag buffer hash node */
0142 
0143     xfs_daddr_t     b_rhash_key;    /* buffer cache index */
0144     int         b_length;   /* size of buffer in BBs */
0145     atomic_t        b_hold;     /* reference count */
0146     atomic_t        b_lru_ref;  /* lru reclaim ref count */
0147     xfs_buf_flags_t     b_flags;    /* status flags */
0148     struct semaphore    b_sema;     /* semaphore for lockables */
0149 
0150     /*
0151      * concurrent access to b_lru and b_lru_flags are protected by
0152      * bt_lru_lock and not by b_sema
0153      */
0154     struct list_head    b_lru;      /* lru list */
0155     spinlock_t      b_lock;     /* internal state lock */
0156     unsigned int        b_state;    /* internal state flags */
0157     int         b_io_error; /* internal IO error state */
0158     wait_queue_head_t   b_waiters;  /* unpin waiters */
0159     struct list_head    b_list;
0160     struct xfs_perag    *b_pag;     /* contains rbtree root */
0161     struct xfs_mount    *b_mount;
0162     struct xfs_buftarg  *b_target;  /* buffer target (device) */
0163     void            *b_addr;    /* virtual address of buffer */
0164     struct work_struct  b_ioend_work;
0165     struct completion   b_iowait;   /* queue for I/O waiters */
0166     struct xfs_buf_log_item *b_log_item;
0167     struct list_head    b_li_list;  /* Log items list head */
0168     struct xfs_trans    *b_transp;
0169     struct page     **b_pages;  /* array of page pointers */
0170     struct page     *b_page_array[XB_PAGES]; /* inline pages */
0171     struct xfs_buf_map  *b_maps;    /* compound buffer map */
0172     struct xfs_buf_map  __b_map;    /* inline compound buffer map */
0173     int         b_map_count;
0174     atomic_t        b_pin_count;    /* pin count */
0175     atomic_t        b_io_remaining; /* #outstanding I/O requests */
0176     unsigned int        b_page_count;   /* size of page array */
0177     unsigned int        b_offset;   /* page offset of b_addr,
0178                            only for _XBF_KMEM buffers */
0179     int         b_error;    /* error code on I/O */
0180 
0181     /*
0182      * async write failure retry count. Initialised to zero on the first
0183      * failure, then when it exceeds the maximum configured without a
0184      * success the write is considered to be failed permanently and the
0185      * iodone handler will take appropriate action.
0186      *
0187      * For retry timeouts, we record the jiffie of the first failure. This
0188      * means that we can change the retry timeout for buffers already under
0189      * I/O and thus avoid getting stuck in a retry loop with a long timeout.
0190      *
0191      * last_error is used to ensure that we are getting repeated errors, not
0192      * different errors. e.g. a block device might change ENOSPC to EIO when
0193      * a failure timeout occurs, so we want to re-initialise the error
0194      * retry behaviour appropriately when that happens.
0195      */
0196     int         b_retries;
0197     unsigned long       b_first_retry_time; /* in jiffies */
0198     int         b_last_error;
0199 
0200     const struct xfs_buf_ops    *b_ops;
0201     struct rcu_head     b_rcu;
0202 };
0203 
0204 /* Finding and Reading Buffers */
0205 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
0206         int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
0207 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
0208         int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
0209         const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
0210 void xfs_buf_readahead_map(struct xfs_buftarg *target,
0211                    struct xfs_buf_map *map, int nmaps,
0212                    const struct xfs_buf_ops *ops);
0213 
0214 static inline int
0215 xfs_buf_incore(
0216     struct xfs_buftarg  *target,
0217     xfs_daddr_t     blkno,
0218     size_t          numblks,
0219     xfs_buf_flags_t     flags,
0220     struct xfs_buf      **bpp)
0221 {
0222     DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
0223 
0224     return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
0225 }
0226 
0227 static inline int
0228 xfs_buf_get(
0229     struct xfs_buftarg  *target,
0230     xfs_daddr_t     blkno,
0231     size_t          numblks,
0232     struct xfs_buf      **bpp)
0233 {
0234     DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
0235 
0236     return xfs_buf_get_map(target, &map, 1, 0, bpp);
0237 }
0238 
0239 static inline int
0240 xfs_buf_read(
0241     struct xfs_buftarg  *target,
0242     xfs_daddr_t     blkno,
0243     size_t          numblks,
0244     xfs_buf_flags_t     flags,
0245     struct xfs_buf      **bpp,
0246     const struct xfs_buf_ops *ops)
0247 {
0248     DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
0249 
0250     return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
0251             __builtin_return_address(0));
0252 }
0253 
0254 static inline void
0255 xfs_buf_readahead(
0256     struct xfs_buftarg  *target,
0257     xfs_daddr_t     blkno,
0258     size_t          numblks,
0259     const struct xfs_buf_ops *ops)
0260 {
0261     DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
0262     return xfs_buf_readahead_map(target, &map, 1, ops);
0263 }
0264 
0265 int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
0266         xfs_buf_flags_t flags, struct xfs_buf **bpp);
0267 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
0268         size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
0269         const struct xfs_buf_ops *ops);
0270 int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
0271 void xfs_buf_hold(struct xfs_buf *bp);
0272 
0273 /* Releasing Buffers */
0274 extern void xfs_buf_rele(struct xfs_buf *);
0275 
0276 /* Locking and Unlocking Buffers */
0277 extern int xfs_buf_trylock(struct xfs_buf *);
0278 extern void xfs_buf_lock(struct xfs_buf *);
0279 extern void xfs_buf_unlock(struct xfs_buf *);
0280 #define xfs_buf_islocked(bp) \
0281     ((bp)->b_sema.count <= 0)
0282 
0283 static inline void xfs_buf_relse(struct xfs_buf *bp)
0284 {
0285     xfs_buf_unlock(bp);
0286     xfs_buf_rele(bp);
0287 }
0288 
0289 /* Buffer Read and Write Routines */
0290 extern int xfs_bwrite(struct xfs_buf *bp);
0291 
0292 extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
0293         xfs_failaddr_t failaddr);
0294 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
0295 extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
0296 void xfs_buf_ioend_fail(struct xfs_buf *);
0297 void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
0298 void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
0299 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
0300 
0301 /* Buffer Utility Routines */
0302 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
0303 extern void xfs_buf_stale(struct xfs_buf *bp);
0304 
0305 /* Delayed Write Buffer Routines */
0306 extern void xfs_buf_delwri_cancel(struct list_head *);
0307 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
0308 extern int xfs_buf_delwri_submit(struct list_head *);
0309 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
0310 extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
0311 
0312 static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
0313 {
0314     return bp->b_maps[0].bm_bn;
0315 }
0316 
0317 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
0318 
0319 /*
0320  * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
0321  * up with a reference count of 0 so it will be tossed from the cache when
0322  * released.
0323  */
0324 static inline void xfs_buf_oneshot(struct xfs_buf *bp)
0325 {
0326     if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
0327         return;
0328     atomic_set(&bp->b_lru_ref, 0);
0329 }
0330 
0331 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
0332 {
0333     return atomic_read(&bp->b_pin_count);
0334 }
0335 
0336 static inline int
0337 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
0338 {
0339     return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
0340                 cksum_offset);
0341 }
0342 
0343 static inline void
0344 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
0345 {
0346     xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
0347              cksum_offset);
0348 }
0349 
0350 /*
0351  *  Handling of buftargs.
0352  */
0353 struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
0354         struct block_device *bdev);
0355 extern void xfs_free_buftarg(struct xfs_buftarg *);
0356 extern void xfs_buftarg_wait(struct xfs_buftarg *);
0357 extern void xfs_buftarg_drain(struct xfs_buftarg *);
0358 extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
0359 
0360 #define xfs_getsize_buftarg(buftarg)    block_size((buftarg)->bt_bdev)
0361 #define xfs_readonly_buftarg(buftarg)   bdev_read_only((buftarg)->bt_bdev)
0362 
0363 int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
0364 bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
0365 bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
0366 
0367 #endif  /* __XFS_BUF_H__ */