Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
0004  * All Rights Reserved.
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_trans.h"
0015 #include "xfs_trans_priv.h"
0016 #include "xfs_buf_item.h"
0017 #include "xfs_inode.h"
0018 #include "xfs_inode_item.h"
0019 #include "xfs_quota.h"
0020 #include "xfs_dquot_item.h"
0021 #include "xfs_dquot.h"
0022 #include "xfs_trace.h"
0023 #include "xfs_log.h"
0024 #include "xfs_log_priv.h"
0025 
0026 
0027 struct kmem_cache   *xfs_buf_item_cache;
0028 
0029 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
0030 {
0031     return container_of(lip, struct xfs_buf_log_item, bli_item);
0032 }
0033 
0034 /* Is this log iovec plausibly large enough to contain the buffer log format? */
0035 bool
0036 xfs_buf_log_check_iovec(
0037     struct xfs_log_iovec        *iovec)
0038 {
0039     struct xfs_buf_log_format   *blfp = iovec->i_addr;
0040     char                *bmp_end;
0041     char                *item_end;
0042 
0043     if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
0044         return false;
0045 
0046     item_end = (char *)iovec->i_addr + iovec->i_len;
0047     bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
0048     return bmp_end <= item_end;
0049 }
0050 
0051 static inline int
0052 xfs_buf_log_format_size(
0053     struct xfs_buf_log_format *blfp)
0054 {
0055     return offsetof(struct xfs_buf_log_format, blf_data_map) +
0056             (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
0057 }
0058 
0059 static inline bool
0060 xfs_buf_item_straddle(
0061     struct xfs_buf      *bp,
0062     uint            offset,
0063     int         first_bit,
0064     int         nbits)
0065 {
0066     void            *first, *last;
0067 
0068     first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
0069     last = xfs_buf_offset(bp,
0070             offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
0071 
0072     if (last - first != nbits * XFS_BLF_CHUNK)
0073         return true;
0074     return false;
0075 }
0076 
0077 /*
0078  * Return the number of log iovecs and space needed to log the given buf log
0079  * item segment.
0080  *
0081  * It calculates this as 1 iovec for the buf log format structure and 1 for each
0082  * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
0083  * in a single iovec.
0084  */
0085 STATIC void
0086 xfs_buf_item_size_segment(
0087     struct xfs_buf_log_item     *bip,
0088     struct xfs_buf_log_format   *blfp,
0089     uint                offset,
0090     int             *nvecs,
0091     int             *nbytes)
0092 {
0093     struct xfs_buf          *bp = bip->bli_buf;
0094     int             first_bit;
0095     int             nbits;
0096     int             next_bit;
0097     int             last_bit;
0098 
0099     first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
0100     if (first_bit == -1)
0101         return;
0102 
0103     (*nvecs)++;
0104     *nbytes += xfs_buf_log_format_size(blfp);
0105 
0106     do {
0107         nbits = xfs_contig_bits(blfp->blf_data_map,
0108                     blfp->blf_map_size, first_bit);
0109         ASSERT(nbits > 0);
0110 
0111         /*
0112          * Straddling a page is rare because we don't log contiguous
0113          * chunks of unmapped buffers anywhere.
0114          */
0115         if (nbits > 1 &&
0116             xfs_buf_item_straddle(bp, offset, first_bit, nbits))
0117             goto slow_scan;
0118 
0119         (*nvecs)++;
0120         *nbytes += nbits * XFS_BLF_CHUNK;
0121 
0122         /*
0123          * This takes the bit number to start looking from and
0124          * returns the next set bit from there.  It returns -1
0125          * if there are no more bits set or the start bit is
0126          * beyond the end of the bitmap.
0127          */
0128         first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0129                     (uint)first_bit + nbits + 1);
0130     } while (first_bit != -1);
0131 
0132     return;
0133 
0134 slow_scan:
0135     /* Count the first bit we jumped out of the above loop from */
0136     (*nvecs)++;
0137     *nbytes += XFS_BLF_CHUNK;
0138     last_bit = first_bit;
0139     while (last_bit != -1) {
0140         /*
0141          * This takes the bit number to start looking from and
0142          * returns the next set bit from there.  It returns -1
0143          * if there are no more bits set or the start bit is
0144          * beyond the end of the bitmap.
0145          */
0146         next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0147                     last_bit + 1);
0148         /*
0149          * If we run out of bits, leave the loop,
0150          * else if we find a new set of bits bump the number of vecs,
0151          * else keep scanning the current set of bits.
0152          */
0153         if (next_bit == -1) {
0154             break;
0155         } else if (next_bit != last_bit + 1 ||
0156                    xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
0157             last_bit = next_bit;
0158             first_bit = next_bit;
0159             (*nvecs)++;
0160             nbits = 1;
0161         } else {
0162             last_bit++;
0163             nbits++;
0164         }
0165         *nbytes += XFS_BLF_CHUNK;
0166     }
0167 }
0168 
0169 /*
0170  * Return the number of log iovecs and space needed to log the given buf log
0171  * item.
0172  *
0173  * Discontiguous buffers need a format structure per region that is being
0174  * logged. This makes the changes in the buffer appear to log recovery as though
0175  * they came from separate buffers, just like would occur if multiple buffers
0176  * were used instead of a single discontiguous buffer. This enables
0177  * discontiguous buffers to be in-memory constructs, completely transparent to
0178  * what ends up on disk.
0179  *
0180  * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
0181  * format structures. If the item has previously been logged and has dirty
0182  * regions, we do not relog them in stale buffers. This has the effect of
0183  * reducing the size of the relogged item by the amount of dirty data tracked
0184  * by the log item. This can result in the committing transaction reducing the
0185  * amount of space being consumed by the CIL.
0186  */
0187 STATIC void
0188 xfs_buf_item_size(
0189     struct xfs_log_item *lip,
0190     int         *nvecs,
0191     int         *nbytes)
0192 {
0193     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0194     struct xfs_buf      *bp = bip->bli_buf;
0195     int         i;
0196     int         bytes;
0197     uint            offset = 0;
0198 
0199     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0200     if (bip->bli_flags & XFS_BLI_STALE) {
0201         /*
0202          * The buffer is stale, so all we need to log is the buf log
0203          * format structure with the cancel flag in it as we are never
0204          * going to replay the changes tracked in the log item.
0205          */
0206         trace_xfs_buf_item_size_stale(bip);
0207         ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
0208         *nvecs += bip->bli_format_count;
0209         for (i = 0; i < bip->bli_format_count; i++) {
0210             *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
0211         }
0212         return;
0213     }
0214 
0215     ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
0216 
0217     if (bip->bli_flags & XFS_BLI_ORDERED) {
0218         /*
0219          * The buffer has been logged just to order it. It is not being
0220          * included in the transaction commit, so no vectors are used at
0221          * all.
0222          */
0223         trace_xfs_buf_item_size_ordered(bip);
0224         *nvecs = XFS_LOG_VEC_ORDERED;
0225         return;
0226     }
0227 
0228     /*
0229      * The vector count is based on the number of buffer vectors we have
0230      * dirty bits in. This will only be greater than one when we have a
0231      * compound buffer with more than one segment dirty. Hence for compound
0232      * buffers we need to track which segment the dirty bits correspond to,
0233      * and when we move from one segment to the next increment the vector
0234      * count for the extra buf log format structure that will need to be
0235      * written.
0236      */
0237     bytes = 0;
0238     for (i = 0; i < bip->bli_format_count; i++) {
0239         xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
0240                       nvecs, &bytes);
0241         offset += BBTOB(bp->b_maps[i].bm_len);
0242     }
0243 
0244     /*
0245      * Round up the buffer size required to minimise the number of memory
0246      * allocations that need to be done as this item grows when relogged by
0247      * repeated modifications.
0248      */
0249     *nbytes = round_up(bytes, 512);
0250     trace_xfs_buf_item_size(bip);
0251 }
0252 
0253 static inline void
0254 xfs_buf_item_copy_iovec(
0255     struct xfs_log_vec  *lv,
0256     struct xfs_log_iovec    **vecp,
0257     struct xfs_buf      *bp,
0258     uint            offset,
0259     int         first_bit,
0260     uint            nbits)
0261 {
0262     offset += first_bit * XFS_BLF_CHUNK;
0263     xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
0264             xfs_buf_offset(bp, offset),
0265             nbits * XFS_BLF_CHUNK);
0266 }
0267 
0268 static void
0269 xfs_buf_item_format_segment(
0270     struct xfs_buf_log_item *bip,
0271     struct xfs_log_vec  *lv,
0272     struct xfs_log_iovec    **vecp,
0273     uint            offset,
0274     struct xfs_buf_log_format *blfp)
0275 {
0276     struct xfs_buf      *bp = bip->bli_buf;
0277     uint            base_size;
0278     int         first_bit;
0279     int         last_bit;
0280     int         next_bit;
0281     uint            nbits;
0282 
0283     /* copy the flags across from the base format item */
0284     blfp->blf_flags = bip->__bli_format.blf_flags;
0285 
0286     /*
0287      * Base size is the actual size of the ondisk structure - it reflects
0288      * the actual size of the dirty bitmap rather than the size of the in
0289      * memory structure.
0290      */
0291     base_size = xfs_buf_log_format_size(blfp);
0292 
0293     first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
0294     if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
0295         /*
0296          * If the map is not be dirty in the transaction, mark
0297          * the size as zero and do not advance the vector pointer.
0298          */
0299         return;
0300     }
0301 
0302     blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
0303     blfp->blf_size = 1;
0304 
0305     if (bip->bli_flags & XFS_BLI_STALE) {
0306         /*
0307          * The buffer is stale, so all we need to log
0308          * is the buf log format structure with the
0309          * cancel flag in it.
0310          */
0311         trace_xfs_buf_item_format_stale(bip);
0312         ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
0313         return;
0314     }
0315 
0316 
0317     /*
0318      * Fill in an iovec for each set of contiguous chunks.
0319      */
0320     do {
0321         ASSERT(first_bit >= 0);
0322         nbits = xfs_contig_bits(blfp->blf_data_map,
0323                     blfp->blf_map_size, first_bit);
0324         ASSERT(nbits > 0);
0325 
0326         /*
0327          * Straddling a page is rare because we don't log contiguous
0328          * chunks of unmapped buffers anywhere.
0329          */
0330         if (nbits > 1 &&
0331             xfs_buf_item_straddle(bp, offset, first_bit, nbits))
0332             goto slow_scan;
0333 
0334         xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
0335                     first_bit, nbits);
0336         blfp->blf_size++;
0337 
0338         /*
0339          * This takes the bit number to start looking from and
0340          * returns the next set bit from there.  It returns -1
0341          * if there are no more bits set or the start bit is
0342          * beyond the end of the bitmap.
0343          */
0344         first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0345                     (uint)first_bit + nbits + 1);
0346     } while (first_bit != -1);
0347 
0348     return;
0349 
0350 slow_scan:
0351     ASSERT(bp->b_addr == NULL);
0352     last_bit = first_bit;
0353     nbits = 1;
0354     for (;;) {
0355         /*
0356          * This takes the bit number to start looking from and
0357          * returns the next set bit from there.  It returns -1
0358          * if there are no more bits set or the start bit is
0359          * beyond the end of the bitmap.
0360          */
0361         next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
0362                     (uint)last_bit + 1);
0363         /*
0364          * If we run out of bits fill in the last iovec and get out of
0365          * the loop.  Else if we start a new set of bits then fill in
0366          * the iovec for the series we were looking at and start
0367          * counting the bits in the new one.  Else we're still in the
0368          * same set of bits so just keep counting and scanning.
0369          */
0370         if (next_bit == -1) {
0371             xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
0372                         first_bit, nbits);
0373             blfp->blf_size++;
0374             break;
0375         } else if (next_bit != last_bit + 1 ||
0376                    xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
0377             xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
0378                         first_bit, nbits);
0379             blfp->blf_size++;
0380             first_bit = next_bit;
0381             last_bit = next_bit;
0382             nbits = 1;
0383         } else {
0384             last_bit++;
0385             nbits++;
0386         }
0387     }
0388 }
0389 
0390 /*
0391  * This is called to fill in the vector of log iovecs for the
0392  * given log buf item.  It fills the first entry with a buf log
0393  * format structure, and the rest point to contiguous chunks
0394  * within the buffer.
0395  */
0396 STATIC void
0397 xfs_buf_item_format(
0398     struct xfs_log_item *lip,
0399     struct xfs_log_vec  *lv)
0400 {
0401     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0402     struct xfs_buf      *bp = bip->bli_buf;
0403     struct xfs_log_iovec    *vecp = NULL;
0404     uint            offset = 0;
0405     int         i;
0406 
0407     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0408     ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
0409            (bip->bli_flags & XFS_BLI_STALE));
0410     ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
0411            (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
0412             && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
0413     ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
0414            (bip->bli_flags & XFS_BLI_STALE));
0415 
0416 
0417     /*
0418      * If it is an inode buffer, transfer the in-memory state to the
0419      * format flags and clear the in-memory state.
0420      *
0421      * For buffer based inode allocation, we do not transfer
0422      * this state if the inode buffer allocation has not yet been committed
0423      * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
0424      * correct replay of the inode allocation.
0425      *
0426      * For icreate item based inode allocation, the buffers aren't written
0427      * to the journal during allocation, and hence we should always tag the
0428      * buffer as an inode buffer so that the correct unlinked list replay
0429      * occurs during recovery.
0430      */
0431     if (bip->bli_flags & XFS_BLI_INODE_BUF) {
0432         if (xfs_has_v3inodes(lip->li_log->l_mp) ||
0433             !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
0434               xfs_log_item_in_current_chkpt(lip)))
0435             bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
0436         bip->bli_flags &= ~XFS_BLI_INODE_BUF;
0437     }
0438 
0439     for (i = 0; i < bip->bli_format_count; i++) {
0440         xfs_buf_item_format_segment(bip, lv, &vecp, offset,
0441                         &bip->bli_formats[i]);
0442         offset += BBTOB(bp->b_maps[i].bm_len);
0443     }
0444 
0445     /*
0446      * Check to make sure everything is consistent.
0447      */
0448     trace_xfs_buf_item_format(bip);
0449 }
0450 
0451 /*
0452  * This is called to pin the buffer associated with the buf log item in memory
0453  * so it cannot be written out.
0454  *
0455  * We also always take a reference to the buffer log item here so that the bli
0456  * is held while the item is pinned in memory. This means that we can
0457  * unconditionally drop the reference count a transaction holds when the
0458  * transaction is completed.
0459  */
0460 STATIC void
0461 xfs_buf_item_pin(
0462     struct xfs_log_item *lip)
0463 {
0464     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0465 
0466     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0467     ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
0468            (bip->bli_flags & XFS_BLI_ORDERED) ||
0469            (bip->bli_flags & XFS_BLI_STALE));
0470 
0471     trace_xfs_buf_item_pin(bip);
0472 
0473     atomic_inc(&bip->bli_refcount);
0474     atomic_inc(&bip->bli_buf->b_pin_count);
0475 }
0476 
0477 /*
0478  * This is called to unpin the buffer associated with the buf log item which
0479  * was previously pinned with a call to xfs_buf_item_pin().
0480  */
0481 STATIC void
0482 xfs_buf_item_unpin(
0483     struct xfs_log_item *lip,
0484     int         remove)
0485 {
0486     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0487     struct xfs_buf      *bp = bip->bli_buf;
0488     int         stale = bip->bli_flags & XFS_BLI_STALE;
0489     int         freed;
0490 
0491     ASSERT(bp->b_log_item == bip);
0492     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0493 
0494     trace_xfs_buf_item_unpin(bip);
0495 
0496     /*
0497      * Drop the bli ref associated with the pin and grab the hold required
0498      * for the I/O simulation failure in the abort case. We have to do this
0499      * before the pin count drops because the AIL doesn't acquire a bli
0500      * reference. Therefore if the refcount drops to zero, the bli could
0501      * still be AIL resident and the buffer submitted for I/O (and freed on
0502      * completion) at any point before we return. This can be removed once
0503      * the AIL properly holds a reference on the bli.
0504      */
0505     freed = atomic_dec_and_test(&bip->bli_refcount);
0506     if (freed && !stale && remove)
0507         xfs_buf_hold(bp);
0508     if (atomic_dec_and_test(&bp->b_pin_count))
0509         wake_up_all(&bp->b_waiters);
0510 
0511      /* nothing to do but drop the pin count if the bli is active */
0512     if (!freed)
0513         return;
0514 
0515     if (stale) {
0516         ASSERT(bip->bli_flags & XFS_BLI_STALE);
0517         ASSERT(xfs_buf_islocked(bp));
0518         ASSERT(bp->b_flags & XBF_STALE);
0519         ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
0520         ASSERT(list_empty(&lip->li_trans));
0521         ASSERT(!bp->b_transp);
0522 
0523         trace_xfs_buf_item_unpin_stale(bip);
0524 
0525         /*
0526          * If we get called here because of an IO error, we may or may
0527          * not have the item on the AIL. xfs_trans_ail_delete() will
0528          * take care of that situation. xfs_trans_ail_delete() drops
0529          * the AIL lock.
0530          */
0531         if (bip->bli_flags & XFS_BLI_STALE_INODE) {
0532             xfs_buf_item_done(bp);
0533             xfs_buf_inode_iodone(bp);
0534             ASSERT(list_empty(&bp->b_li_list));
0535         } else {
0536             xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
0537             xfs_buf_item_relse(bp);
0538             ASSERT(bp->b_log_item == NULL);
0539         }
0540         xfs_buf_relse(bp);
0541     } else if (remove) {
0542         /*
0543          * The buffer must be locked and held by the caller to simulate
0544          * an async I/O failure. We acquired the hold for this case
0545          * before the buffer was unpinned.
0546          */
0547         xfs_buf_lock(bp);
0548         bp->b_flags |= XBF_ASYNC;
0549         xfs_buf_ioend_fail(bp);
0550     }
0551 }
0552 
0553 STATIC uint
0554 xfs_buf_item_push(
0555     struct xfs_log_item *lip,
0556     struct list_head    *buffer_list)
0557 {
0558     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0559     struct xfs_buf      *bp = bip->bli_buf;
0560     uint            rval = XFS_ITEM_SUCCESS;
0561 
0562     if (xfs_buf_ispinned(bp))
0563         return XFS_ITEM_PINNED;
0564     if (!xfs_buf_trylock(bp)) {
0565         /*
0566          * If we have just raced with a buffer being pinned and it has
0567          * been marked stale, we could end up stalling until someone else
0568          * issues a log force to unpin the stale buffer. Check for the
0569          * race condition here so xfsaild recognizes the buffer is pinned
0570          * and queues a log force to move it along.
0571          */
0572         if (xfs_buf_ispinned(bp))
0573             return XFS_ITEM_PINNED;
0574         return XFS_ITEM_LOCKED;
0575     }
0576 
0577     ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
0578 
0579     trace_xfs_buf_item_push(bip);
0580 
0581     /* has a previous flush failed due to IO errors? */
0582     if (bp->b_flags & XBF_WRITE_FAIL) {
0583         xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
0584         "Failing async write on buffer block 0x%llx. Retrying async write.",
0585                       (long long)xfs_buf_daddr(bp));
0586     }
0587 
0588     if (!xfs_buf_delwri_queue(bp, buffer_list))
0589         rval = XFS_ITEM_FLUSHING;
0590     xfs_buf_unlock(bp);
0591     return rval;
0592 }
0593 
0594 /*
0595  * Drop the buffer log item refcount and take appropriate action. This helper
0596  * determines whether the bli must be freed or not, since a decrement to zero
0597  * does not necessarily mean the bli is unused.
0598  *
0599  * Return true if the bli is freed, false otherwise.
0600  */
0601 bool
0602 xfs_buf_item_put(
0603     struct xfs_buf_log_item *bip)
0604 {
0605     struct xfs_log_item *lip = &bip->bli_item;
0606     bool            aborted;
0607     bool            dirty;
0608 
0609     /* drop the bli ref and return if it wasn't the last one */
0610     if (!atomic_dec_and_test(&bip->bli_refcount))
0611         return false;
0612 
0613     /*
0614      * We dropped the last ref and must free the item if clean or aborted.
0615      * If the bli is dirty and non-aborted, the buffer was clean in the
0616      * transaction but still awaiting writeback from previous changes. In
0617      * that case, the bli is freed on buffer writeback completion.
0618      */
0619     aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
0620             xlog_is_shutdown(lip->li_log);
0621     dirty = bip->bli_flags & XFS_BLI_DIRTY;
0622     if (dirty && !aborted)
0623         return false;
0624 
0625     /*
0626      * The bli is aborted or clean. An aborted item may be in the AIL
0627      * regardless of dirty state.  For example, consider an aborted
0628      * transaction that invalidated a dirty bli and cleared the dirty
0629      * state.
0630      */
0631     if (aborted)
0632         xfs_trans_ail_delete(lip, 0);
0633     xfs_buf_item_relse(bip->bli_buf);
0634     return true;
0635 }
0636 
0637 /*
0638  * Release the buffer associated with the buf log item.  If there is no dirty
0639  * logged data associated with the buffer recorded in the buf log item, then
0640  * free the buf log item and remove the reference to it in the buffer.
0641  *
0642  * This call ignores the recursion count.  It is only called when the buffer
0643  * should REALLY be unlocked, regardless of the recursion count.
0644  *
0645  * We unconditionally drop the transaction's reference to the log item. If the
0646  * item was logged, then another reference was taken when it was pinned, so we
0647  * can safely drop the transaction reference now.  This also allows us to avoid
0648  * potential races with the unpin code freeing the bli by not referencing the
0649  * bli after we've dropped the reference count.
0650  *
0651  * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
0652  * if necessary but do not unlock the buffer.  This is for support of
0653  * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
0654  * free the item.
0655  */
0656 STATIC void
0657 xfs_buf_item_release(
0658     struct xfs_log_item *lip)
0659 {
0660     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0661     struct xfs_buf      *bp = bip->bli_buf;
0662     bool            released;
0663     bool            hold = bip->bli_flags & XFS_BLI_HOLD;
0664     bool            stale = bip->bli_flags & XFS_BLI_STALE;
0665 #if defined(DEBUG) || defined(XFS_WARN)
0666     bool            ordered = bip->bli_flags & XFS_BLI_ORDERED;
0667     bool            dirty = bip->bli_flags & XFS_BLI_DIRTY;
0668     bool            aborted = test_bit(XFS_LI_ABORTED,
0669                            &lip->li_flags);
0670 #endif
0671 
0672     trace_xfs_buf_item_release(bip);
0673 
0674     /*
0675      * The bli dirty state should match whether the blf has logged segments
0676      * except for ordered buffers, where only the bli should be dirty.
0677      */
0678     ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
0679            (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
0680     ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
0681 
0682     /*
0683      * Clear the buffer's association with this transaction and
0684      * per-transaction state from the bli, which has been copied above.
0685      */
0686     bp->b_transp = NULL;
0687     bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
0688 
0689     /*
0690      * Unref the item and unlock the buffer unless held or stale. Stale
0691      * buffers remain locked until final unpin unless the bli is freed by
0692      * the unref call. The latter implies shutdown because buffer
0693      * invalidation dirties the bli and transaction.
0694      */
0695     released = xfs_buf_item_put(bip);
0696     if (hold || (stale && !released))
0697         return;
0698     ASSERT(!stale || aborted);
0699     xfs_buf_relse(bp);
0700 }
0701 
0702 STATIC void
0703 xfs_buf_item_committing(
0704     struct xfs_log_item *lip,
0705     xfs_csn_t       seq)
0706 {
0707     return xfs_buf_item_release(lip);
0708 }
0709 
0710 /*
0711  * This is called to find out where the oldest active copy of the
0712  * buf log item in the on disk log resides now that the last log
0713  * write of it completed at the given lsn.
0714  * We always re-log all the dirty data in a buffer, so usually the
0715  * latest copy in the on disk log is the only one that matters.  For
0716  * those cases we simply return the given lsn.
0717  *
0718  * The one exception to this is for buffers full of newly allocated
0719  * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
0720  * flag set, indicating that only the di_next_unlinked fields from the
0721  * inodes in the buffers will be replayed during recovery.  If the
0722  * original newly allocated inode images have not yet been flushed
0723  * when the buffer is so relogged, then we need to make sure that we
0724  * keep the old images in the 'active' portion of the log.  We do this
0725  * by returning the original lsn of that transaction here rather than
0726  * the current one.
0727  */
0728 STATIC xfs_lsn_t
0729 xfs_buf_item_committed(
0730     struct xfs_log_item *lip,
0731     xfs_lsn_t       lsn)
0732 {
0733     struct xfs_buf_log_item *bip = BUF_ITEM(lip);
0734 
0735     trace_xfs_buf_item_committed(bip);
0736 
0737     if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
0738         return lip->li_lsn;
0739     return lsn;
0740 }
0741 
0742 static const struct xfs_item_ops xfs_buf_item_ops = {
0743     .iop_size   = xfs_buf_item_size,
0744     .iop_format = xfs_buf_item_format,
0745     .iop_pin    = xfs_buf_item_pin,
0746     .iop_unpin  = xfs_buf_item_unpin,
0747     .iop_release    = xfs_buf_item_release,
0748     .iop_committing = xfs_buf_item_committing,
0749     .iop_committed  = xfs_buf_item_committed,
0750     .iop_push   = xfs_buf_item_push,
0751 };
0752 
0753 STATIC void
0754 xfs_buf_item_get_format(
0755     struct xfs_buf_log_item *bip,
0756     int         count)
0757 {
0758     ASSERT(bip->bli_formats == NULL);
0759     bip->bli_format_count = count;
0760 
0761     if (count == 1) {
0762         bip->bli_formats = &bip->__bli_format;
0763         return;
0764     }
0765 
0766     bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
0767                 0);
0768 }
0769 
0770 STATIC void
0771 xfs_buf_item_free_format(
0772     struct xfs_buf_log_item *bip)
0773 {
0774     if (bip->bli_formats != &bip->__bli_format) {
0775         kmem_free(bip->bli_formats);
0776         bip->bli_formats = NULL;
0777     }
0778 }
0779 
0780 /*
0781  * Allocate a new buf log item to go with the given buffer.
0782  * Set the buffer's b_log_item field to point to the new
0783  * buf log item.
0784  */
0785 int
0786 xfs_buf_item_init(
0787     struct xfs_buf  *bp,
0788     struct xfs_mount *mp)
0789 {
0790     struct xfs_buf_log_item *bip = bp->b_log_item;
0791     int         chunks;
0792     int         map_size;
0793     int         i;
0794 
0795     /*
0796      * Check to see if there is already a buf log item for
0797      * this buffer. If we do already have one, there is
0798      * nothing to do here so return.
0799      */
0800     ASSERT(bp->b_mount == mp);
0801     if (bip) {
0802         ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
0803         ASSERT(!bp->b_transp);
0804         ASSERT(bip->bli_buf == bp);
0805         return 0;
0806     }
0807 
0808     bip = kmem_cache_zalloc(xfs_buf_item_cache, GFP_KERNEL | __GFP_NOFAIL);
0809     xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
0810     bip->bli_buf = bp;
0811 
0812     /*
0813      * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
0814      * can be divided into. Make sure not to truncate any pieces.
0815      * map_size is the size of the bitmap needed to describe the
0816      * chunks of the buffer.
0817      *
0818      * Discontiguous buffer support follows the layout of the underlying
0819      * buffer. This makes the implementation as simple as possible.
0820      */
0821     xfs_buf_item_get_format(bip, bp->b_map_count);
0822 
0823     for (i = 0; i < bip->bli_format_count; i++) {
0824         chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
0825                       XFS_BLF_CHUNK);
0826         map_size = DIV_ROUND_UP(chunks, NBWORD);
0827 
0828         if (map_size > XFS_BLF_DATAMAP_SIZE) {
0829             kmem_cache_free(xfs_buf_item_cache, bip);
0830             xfs_err(mp,
0831     "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
0832                     map_size,
0833                     BBTOB(bp->b_maps[i].bm_len));
0834             return -EFSCORRUPTED;
0835         }
0836 
0837         bip->bli_formats[i].blf_type = XFS_LI_BUF;
0838         bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
0839         bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
0840         bip->bli_formats[i].blf_map_size = map_size;
0841     }
0842 
0843     bp->b_log_item = bip;
0844     xfs_buf_hold(bp);
0845     return 0;
0846 }
0847 
0848 
0849 /*
0850  * Mark bytes first through last inclusive as dirty in the buf
0851  * item's bitmap.
0852  */
0853 static void
0854 xfs_buf_item_log_segment(
0855     uint            first,
0856     uint            last,
0857     uint            *map)
0858 {
0859     uint        first_bit;
0860     uint        last_bit;
0861     uint        bits_to_set;
0862     uint        bits_set;
0863     uint        word_num;
0864     uint        *wordp;
0865     uint        bit;
0866     uint        end_bit;
0867     uint        mask;
0868 
0869     ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
0870     ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
0871 
0872     /*
0873      * Convert byte offsets to bit numbers.
0874      */
0875     first_bit = first >> XFS_BLF_SHIFT;
0876     last_bit = last >> XFS_BLF_SHIFT;
0877 
0878     /*
0879      * Calculate the total number of bits to be set.
0880      */
0881     bits_to_set = last_bit - first_bit + 1;
0882 
0883     /*
0884      * Get a pointer to the first word in the bitmap
0885      * to set a bit in.
0886      */
0887     word_num = first_bit >> BIT_TO_WORD_SHIFT;
0888     wordp = &map[word_num];
0889 
0890     /*
0891      * Calculate the starting bit in the first word.
0892      */
0893     bit = first_bit & (uint)(NBWORD - 1);
0894 
0895     /*
0896      * First set any bits in the first word of our range.
0897      * If it starts at bit 0 of the word, it will be
0898      * set below rather than here.  That is what the variable
0899      * bit tells us. The variable bits_set tracks the number
0900      * of bits that have been set so far.  End_bit is the number
0901      * of the last bit to be set in this word plus one.
0902      */
0903     if (bit) {
0904         end_bit = min(bit + bits_to_set, (uint)NBWORD);
0905         mask = ((1U << (end_bit - bit)) - 1) << bit;
0906         *wordp |= mask;
0907         wordp++;
0908         bits_set = end_bit - bit;
0909     } else {
0910         bits_set = 0;
0911     }
0912 
0913     /*
0914      * Now set bits a whole word at a time that are between
0915      * first_bit and last_bit.
0916      */
0917     while ((bits_to_set - bits_set) >= NBWORD) {
0918         *wordp = 0xffffffff;
0919         bits_set += NBWORD;
0920         wordp++;
0921     }
0922 
0923     /*
0924      * Finally, set any bits left to be set in one last partial word.
0925      */
0926     end_bit = bits_to_set - bits_set;
0927     if (end_bit) {
0928         mask = (1U << end_bit) - 1;
0929         *wordp |= mask;
0930     }
0931 }
0932 
0933 /*
0934  * Mark bytes first through last inclusive as dirty in the buf
0935  * item's bitmap.
0936  */
0937 void
0938 xfs_buf_item_log(
0939     struct xfs_buf_log_item *bip,
0940     uint            first,
0941     uint            last)
0942 {
0943     int         i;
0944     uint            start;
0945     uint            end;
0946     struct xfs_buf      *bp = bip->bli_buf;
0947 
0948     /*
0949      * walk each buffer segment and mark them dirty appropriately.
0950      */
0951     start = 0;
0952     for (i = 0; i < bip->bli_format_count; i++) {
0953         if (start > last)
0954             break;
0955         end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
0956 
0957         /* skip to the map that includes the first byte to log */
0958         if (first > end) {
0959             start += BBTOB(bp->b_maps[i].bm_len);
0960             continue;
0961         }
0962 
0963         /*
0964          * Trim the range to this segment and mark it in the bitmap.
0965          * Note that we must convert buffer offsets to segment relative
0966          * offsets (e.g., the first byte of each segment is byte 0 of
0967          * that segment).
0968          */
0969         if (first < start)
0970             first = start;
0971         if (end > last)
0972             end = last;
0973         xfs_buf_item_log_segment(first - start, end - start,
0974                      &bip->bli_formats[i].blf_data_map[0]);
0975 
0976         start += BBTOB(bp->b_maps[i].bm_len);
0977     }
0978 }
0979 
0980 
0981 /*
0982  * Return true if the buffer has any ranges logged/dirtied by a transaction,
0983  * false otherwise.
0984  */
0985 bool
0986 xfs_buf_item_dirty_format(
0987     struct xfs_buf_log_item *bip)
0988 {
0989     int         i;
0990 
0991     for (i = 0; i < bip->bli_format_count; i++) {
0992         if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
0993                  bip->bli_formats[i].blf_map_size))
0994             return true;
0995     }
0996 
0997     return false;
0998 }
0999 
1000 STATIC void
1001 xfs_buf_item_free(
1002     struct xfs_buf_log_item *bip)
1003 {
1004     xfs_buf_item_free_format(bip);
1005     kmem_free(bip->bli_item.li_lv_shadow);
1006     kmem_cache_free(xfs_buf_item_cache, bip);
1007 }
1008 
1009 /*
1010  * xfs_buf_item_relse() is called when the buf log item is no longer needed.
1011  */
1012 void
1013 xfs_buf_item_relse(
1014     struct xfs_buf  *bp)
1015 {
1016     struct xfs_buf_log_item *bip = bp->b_log_item;
1017 
1018     trace_xfs_buf_item_relse(bp, _RET_IP_);
1019     ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
1020 
1021     bp->b_log_item = NULL;
1022     xfs_buf_rele(bp);
1023     xfs_buf_item_free(bip);
1024 }
1025 
1026 void
1027 xfs_buf_item_done(
1028     struct xfs_buf      *bp)
1029 {
1030     /*
1031      * If we are forcibly shutting down, this may well be off the AIL
1032      * already. That's because we simulate the log-committed callbacks to
1033      * unpin these buffers. Or we may never have put this item on AIL
1034      * because of the transaction was aborted forcibly.
1035      * xfs_trans_ail_delete() takes care of these.
1036      *
1037      * Either way, AIL is useless if we're forcing a shutdown.
1038      *
1039      * Note that log recovery writes might have buffer items that are not on
1040      * the AIL even when the file system is not shut down.
1041      */
1042     xfs_trans_ail_delete(&bp->b_log_item->bli_item,
1043                  (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
1044                  SHUTDOWN_CORRUPT_INCORE);
1045     xfs_buf_item_relse(bp);
1046 }