Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
0004  * All Rights Reserved.
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_mount.h"
0013 #include "xfs_trans.h"
0014 #include "xfs_buf_item.h"
0015 #include "xfs_trans_priv.h"
0016 #include "xfs_trace.h"
0017 
0018 /*
0019  * Check to see if a buffer matching the given parameters is already
0020  * a part of the given transaction.
0021  */
0022 STATIC struct xfs_buf *
0023 xfs_trans_buf_item_match(
0024     struct xfs_trans    *tp,
0025     struct xfs_buftarg  *target,
0026     struct xfs_buf_map  *map,
0027     int         nmaps)
0028 {
0029     struct xfs_log_item *lip;
0030     struct xfs_buf_log_item *blip;
0031     int         len = 0;
0032     int         i;
0033 
0034     for (i = 0; i < nmaps; i++)
0035         len += map[i].bm_len;
0036 
0037     list_for_each_entry(lip, &tp->t_items, li_trans) {
0038         blip = (struct xfs_buf_log_item *)lip;
0039         if (blip->bli_item.li_type == XFS_LI_BUF &&
0040             blip->bli_buf->b_target == target &&
0041             xfs_buf_daddr(blip->bli_buf) == map[0].bm_bn &&
0042             blip->bli_buf->b_length == len) {
0043             ASSERT(blip->bli_buf->b_map_count == nmaps);
0044             return blip->bli_buf;
0045         }
0046     }
0047 
0048     return NULL;
0049 }
0050 
0051 /*
0052  * Add the locked buffer to the transaction.
0053  *
0054  * The buffer must be locked, and it cannot be associated with any
0055  * transaction.
0056  *
0057  * If the buffer does not yet have a buf log item associated with it,
0058  * then allocate one for it.  Then add the buf item to the transaction.
0059  */
0060 STATIC void
0061 _xfs_trans_bjoin(
0062     struct xfs_trans    *tp,
0063     struct xfs_buf      *bp,
0064     int         reset_recur)
0065 {
0066     struct xfs_buf_log_item *bip;
0067 
0068     ASSERT(bp->b_transp == NULL);
0069 
0070     /*
0071      * The xfs_buf_log_item pointer is stored in b_log_item.  If
0072      * it doesn't have one yet, then allocate one and initialize it.
0073      * The checks to see if one is there are in xfs_buf_item_init().
0074      */
0075     xfs_buf_item_init(bp, tp->t_mountp);
0076     bip = bp->b_log_item;
0077     ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
0078     ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
0079     ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
0080     if (reset_recur)
0081         bip->bli_recur = 0;
0082 
0083     /*
0084      * Take a reference for this transaction on the buf item.
0085      */
0086     atomic_inc(&bip->bli_refcount);
0087 
0088     /*
0089      * Attach the item to the transaction so we can find it in
0090      * xfs_trans_get_buf() and friends.
0091      */
0092     xfs_trans_add_item(tp, &bip->bli_item);
0093     bp->b_transp = tp;
0094 
0095 }
0096 
0097 void
0098 xfs_trans_bjoin(
0099     struct xfs_trans    *tp,
0100     struct xfs_buf      *bp)
0101 {
0102     _xfs_trans_bjoin(tp, bp, 0);
0103     trace_xfs_trans_bjoin(bp->b_log_item);
0104 }
0105 
0106 /*
0107  * Get and lock the buffer for the caller if it is not already
0108  * locked within the given transaction.  If it is already locked
0109  * within the transaction, just increment its lock recursion count
0110  * and return a pointer to it.
0111  *
0112  * If the transaction pointer is NULL, make this just a normal
0113  * get_buf() call.
0114  */
0115 int
0116 xfs_trans_get_buf_map(
0117     struct xfs_trans    *tp,
0118     struct xfs_buftarg  *target,
0119     struct xfs_buf_map  *map,
0120     int         nmaps,
0121     xfs_buf_flags_t     flags,
0122     struct xfs_buf      **bpp)
0123 {
0124     struct xfs_buf      *bp;
0125     struct xfs_buf_log_item *bip;
0126     int         error;
0127 
0128     *bpp = NULL;
0129     if (!tp)
0130         return xfs_buf_get_map(target, map, nmaps, flags, bpp);
0131 
0132     /*
0133      * If we find the buffer in the cache with this transaction
0134      * pointer in its b_fsprivate2 field, then we know we already
0135      * have it locked.  In this case we just increment the lock
0136      * recursion count and return the buffer to the caller.
0137      */
0138     bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
0139     if (bp != NULL) {
0140         ASSERT(xfs_buf_islocked(bp));
0141         if (xfs_is_shutdown(tp->t_mountp)) {
0142             xfs_buf_stale(bp);
0143             bp->b_flags |= XBF_DONE;
0144         }
0145 
0146         ASSERT(bp->b_transp == tp);
0147         bip = bp->b_log_item;
0148         ASSERT(bip != NULL);
0149         ASSERT(atomic_read(&bip->bli_refcount) > 0);
0150         bip->bli_recur++;
0151         trace_xfs_trans_get_buf_recur(bip);
0152         *bpp = bp;
0153         return 0;
0154     }
0155 
0156     error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
0157     if (error)
0158         return error;
0159 
0160     ASSERT(!bp->b_error);
0161 
0162     _xfs_trans_bjoin(tp, bp, 1);
0163     trace_xfs_trans_get_buf(bp->b_log_item);
0164     *bpp = bp;
0165     return 0;
0166 }
0167 
0168 /*
0169  * Get and lock the superblock buffer for the given transaction.
0170  */
0171 struct xfs_buf *
0172 xfs_trans_getsb(
0173     struct xfs_trans    *tp)
0174 {
0175     struct xfs_buf      *bp = tp->t_mountp->m_sb_bp;
0176 
0177     /*
0178      * Just increment the lock recursion count if the buffer is already
0179      * attached to this transaction.
0180      */
0181     if (bp->b_transp == tp) {
0182         struct xfs_buf_log_item *bip = bp->b_log_item;
0183 
0184         ASSERT(bip != NULL);
0185         ASSERT(atomic_read(&bip->bli_refcount) > 0);
0186         bip->bli_recur++;
0187 
0188         trace_xfs_trans_getsb_recur(bip);
0189     } else {
0190         xfs_buf_lock(bp);
0191         xfs_buf_hold(bp);
0192         _xfs_trans_bjoin(tp, bp, 1);
0193 
0194         trace_xfs_trans_getsb(bp->b_log_item);
0195     }
0196 
0197     return bp;
0198 }
0199 
0200 /*
0201  * Get and lock the buffer for the caller if it is not already
0202  * locked within the given transaction.  If it has not yet been
0203  * read in, read it from disk. If it is already locked
0204  * within the transaction and already read in, just increment its
0205  * lock recursion count and return a pointer to it.
0206  *
0207  * If the transaction pointer is NULL, make this just a normal
0208  * read_buf() call.
0209  */
0210 int
0211 xfs_trans_read_buf_map(
0212     struct xfs_mount    *mp,
0213     struct xfs_trans    *tp,
0214     struct xfs_buftarg  *target,
0215     struct xfs_buf_map  *map,
0216     int         nmaps,
0217     xfs_buf_flags_t     flags,
0218     struct xfs_buf      **bpp,
0219     const struct xfs_buf_ops *ops)
0220 {
0221     struct xfs_buf      *bp = NULL;
0222     struct xfs_buf_log_item *bip;
0223     int         error;
0224 
0225     *bpp = NULL;
0226     /*
0227      * If we find the buffer in the cache with this transaction
0228      * pointer in its b_fsprivate2 field, then we know we already
0229      * have it locked.  If it is already read in we just increment
0230      * the lock recursion count and return the buffer to the caller.
0231      * If the buffer is not yet read in, then we read it in, increment
0232      * the lock recursion count, and return it to the caller.
0233      */
0234     if (tp)
0235         bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
0236     if (bp) {
0237         ASSERT(xfs_buf_islocked(bp));
0238         ASSERT(bp->b_transp == tp);
0239         ASSERT(bp->b_log_item != NULL);
0240         ASSERT(!bp->b_error);
0241         ASSERT(bp->b_flags & XBF_DONE);
0242 
0243         /*
0244          * We never locked this buf ourselves, so we shouldn't
0245          * brelse it either. Just get out.
0246          */
0247         if (xfs_is_shutdown(mp)) {
0248             trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
0249             return -EIO;
0250         }
0251 
0252         /*
0253          * Check if the caller is trying to read a buffer that is
0254          * already attached to the transaction yet has no buffer ops
0255          * assigned.  Ops are usually attached when the buffer is
0256          * attached to the transaction, or by the read caller if
0257          * special circumstances.  That didn't happen, which is not
0258          * how this is supposed to go.
0259          *
0260          * If the buffer passes verification we'll let this go, but if
0261          * not we have to shut down.  Let the transaction cleanup code
0262          * release this buffer when it kills the tranaction.
0263          */
0264         ASSERT(bp->b_ops != NULL);
0265         error = xfs_buf_reverify(bp, ops);
0266         if (error) {
0267             xfs_buf_ioerror_alert(bp, __return_address);
0268 
0269             if (tp->t_flags & XFS_TRANS_DIRTY)
0270                 xfs_force_shutdown(tp->t_mountp,
0271                         SHUTDOWN_META_IO_ERROR);
0272 
0273             /* bad CRC means corrupted metadata */
0274             if (error == -EFSBADCRC)
0275                 error = -EFSCORRUPTED;
0276             return error;
0277         }
0278 
0279         bip = bp->b_log_item;
0280         bip->bli_recur++;
0281 
0282         ASSERT(atomic_read(&bip->bli_refcount) > 0);
0283         trace_xfs_trans_read_buf_recur(bip);
0284         ASSERT(bp->b_ops != NULL || ops == NULL);
0285         *bpp = bp;
0286         return 0;
0287     }
0288 
0289     error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
0290             __return_address);
0291     switch (error) {
0292     case 0:
0293         break;
0294     default:
0295         if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
0296             xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
0297         fallthrough;
0298     case -ENOMEM:
0299     case -EAGAIN:
0300         return error;
0301     }
0302 
0303     if (xfs_is_shutdown(mp)) {
0304         xfs_buf_relse(bp);
0305         trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
0306         return -EIO;
0307     }
0308 
0309     if (tp) {
0310         _xfs_trans_bjoin(tp, bp, 1);
0311         trace_xfs_trans_read_buf(bp->b_log_item);
0312     }
0313     ASSERT(bp->b_ops != NULL || ops == NULL);
0314     *bpp = bp;
0315     return 0;
0316 
0317 }
0318 
0319 /* Has this buffer been dirtied by anyone? */
0320 bool
0321 xfs_trans_buf_is_dirty(
0322     struct xfs_buf      *bp)
0323 {
0324     struct xfs_buf_log_item *bip = bp->b_log_item;
0325 
0326     if (!bip)
0327         return false;
0328     ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
0329     return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
0330 }
0331 
0332 /*
0333  * Release a buffer previously joined to the transaction. If the buffer is
0334  * modified within this transaction, decrement the recursion count but do not
0335  * release the buffer even if the count goes to 0. If the buffer is not modified
0336  * within the transaction, decrement the recursion count and release the buffer
0337  * if the recursion count goes to 0.
0338  *
0339  * If the buffer is to be released and it was not already dirty before this
0340  * transaction began, then also free the buf_log_item associated with it.
0341  *
0342  * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
0343  */
0344 void
0345 xfs_trans_brelse(
0346     struct xfs_trans    *tp,
0347     struct xfs_buf      *bp)
0348 {
0349     struct xfs_buf_log_item *bip = bp->b_log_item;
0350 
0351     ASSERT(bp->b_transp == tp);
0352 
0353     if (!tp) {
0354         xfs_buf_relse(bp);
0355         return;
0356     }
0357 
0358     trace_xfs_trans_brelse(bip);
0359     ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
0360     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0361 
0362     /*
0363      * If the release is for a recursive lookup, then decrement the count
0364      * and return.
0365      */
0366     if (bip->bli_recur > 0) {
0367         bip->bli_recur--;
0368         return;
0369     }
0370 
0371     /*
0372      * If the buffer is invalidated or dirty in this transaction, we can't
0373      * release it until we commit.
0374      */
0375     if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
0376         return;
0377     if (bip->bli_flags & XFS_BLI_STALE)
0378         return;
0379 
0380     /*
0381      * Unlink the log item from the transaction and clear the hold flag, if
0382      * set. We wouldn't want the next user of the buffer to get confused.
0383      */
0384     ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
0385     xfs_trans_del_item(&bip->bli_item);
0386     bip->bli_flags &= ~XFS_BLI_HOLD;
0387 
0388     /* drop the reference to the bli */
0389     xfs_buf_item_put(bip);
0390 
0391     bp->b_transp = NULL;
0392     xfs_buf_relse(bp);
0393 }
0394 
0395 /*
0396  * Mark the buffer as not needing to be unlocked when the buf item's
0397  * iop_committing() routine is called.  The buffer must already be locked
0398  * and associated with the given transaction.
0399  */
0400 /* ARGSUSED */
0401 void
0402 xfs_trans_bhold(
0403     xfs_trans_t     *tp,
0404     struct xfs_buf      *bp)
0405 {
0406     struct xfs_buf_log_item *bip = bp->b_log_item;
0407 
0408     ASSERT(bp->b_transp == tp);
0409     ASSERT(bip != NULL);
0410     ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
0411     ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
0412     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0413 
0414     bip->bli_flags |= XFS_BLI_HOLD;
0415     trace_xfs_trans_bhold(bip);
0416 }
0417 
0418 /*
0419  * Cancel the previous buffer hold request made on this buffer
0420  * for this transaction.
0421  */
0422 void
0423 xfs_trans_bhold_release(
0424     xfs_trans_t     *tp,
0425     struct xfs_buf      *bp)
0426 {
0427     struct xfs_buf_log_item *bip = bp->b_log_item;
0428 
0429     ASSERT(bp->b_transp == tp);
0430     ASSERT(bip != NULL);
0431     ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
0432     ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
0433     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0434     ASSERT(bip->bli_flags & XFS_BLI_HOLD);
0435 
0436     bip->bli_flags &= ~XFS_BLI_HOLD;
0437     trace_xfs_trans_bhold_release(bip);
0438 }
0439 
0440 /*
0441  * Mark a buffer dirty in the transaction.
0442  */
0443 void
0444 xfs_trans_dirty_buf(
0445     struct xfs_trans    *tp,
0446     struct xfs_buf      *bp)
0447 {
0448     struct xfs_buf_log_item *bip = bp->b_log_item;
0449 
0450     ASSERT(bp->b_transp == tp);
0451     ASSERT(bip != NULL);
0452 
0453     /*
0454      * Mark the buffer as needing to be written out eventually,
0455      * and set its iodone function to remove the buffer's buf log
0456      * item from the AIL and free it when the buffer is flushed
0457      * to disk.
0458      */
0459     bp->b_flags |= XBF_DONE;
0460 
0461     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0462 
0463     /*
0464      * If we invalidated the buffer within this transaction, then
0465      * cancel the invalidation now that we're dirtying the buffer
0466      * again.  There are no races with the code in xfs_buf_item_unpin(),
0467      * because we have a reference to the buffer this entire time.
0468      */
0469     if (bip->bli_flags & XFS_BLI_STALE) {
0470         bip->bli_flags &= ~XFS_BLI_STALE;
0471         ASSERT(bp->b_flags & XBF_STALE);
0472         bp->b_flags &= ~XBF_STALE;
0473         bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
0474     }
0475     bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
0476 
0477     tp->t_flags |= XFS_TRANS_DIRTY;
0478     set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
0479 }
0480 
0481 /*
0482  * This is called to mark bytes first through last inclusive of the given
0483  * buffer as needing to be logged when the transaction is committed.
0484  * The buffer must already be associated with the given transaction.
0485  *
0486  * First and last are numbers relative to the beginning of this buffer,
0487  * so the first byte in the buffer is numbered 0 regardless of the
0488  * value of b_blkno.
0489  */
0490 void
0491 xfs_trans_log_buf(
0492     struct xfs_trans    *tp,
0493     struct xfs_buf      *bp,
0494     uint            first,
0495     uint            last)
0496 {
0497     struct xfs_buf_log_item *bip = bp->b_log_item;
0498 
0499     ASSERT(first <= last && last < BBTOB(bp->b_length));
0500     ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
0501 
0502     xfs_trans_dirty_buf(tp, bp);
0503 
0504     trace_xfs_trans_log_buf(bip);
0505     xfs_buf_item_log(bip, first, last);
0506 }
0507 
0508 
0509 /*
0510  * Invalidate a buffer that is being used within a transaction.
0511  *
0512  * Typically this is because the blocks in the buffer are being freed, so we
0513  * need to prevent it from being written out when we're done.  Allowing it
0514  * to be written again might overwrite data in the free blocks if they are
0515  * reallocated to a file.
0516  *
0517  * We prevent the buffer from being written out by marking it stale.  We can't
0518  * get rid of the buf log item at this point because the buffer may still be
0519  * pinned by another transaction.  If that is the case, then we'll wait until
0520  * the buffer is committed to disk for the last time (we can tell by the ref
0521  * count) and free it in xfs_buf_item_unpin().  Until that happens we will
0522  * keep the buffer locked so that the buffer and buf log item are not reused.
0523  *
0524  * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
0525  * the buf item.  This will be used at recovery time to determine that copies
0526  * of the buffer in the log before this should not be replayed.
0527  *
0528  * We mark the item descriptor and the transaction dirty so that we'll hold
0529  * the buffer until after the commit.
0530  *
0531  * Since we're invalidating the buffer, we also clear the state about which
0532  * parts of the buffer have been logged.  We also clear the flag indicating
0533  * that this is an inode buffer since the data in the buffer will no longer
0534  * be valid.
0535  *
0536  * We set the stale bit in the buffer as well since we're getting rid of it.
0537  */
0538 void
0539 xfs_trans_binval(
0540     xfs_trans_t     *tp,
0541     struct xfs_buf      *bp)
0542 {
0543     struct xfs_buf_log_item *bip = bp->b_log_item;
0544     int         i;
0545 
0546     ASSERT(bp->b_transp == tp);
0547     ASSERT(bip != NULL);
0548     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0549 
0550     trace_xfs_trans_binval(bip);
0551 
0552     if (bip->bli_flags & XFS_BLI_STALE) {
0553         /*
0554          * If the buffer is already invalidated, then
0555          * just return.
0556          */
0557         ASSERT(bp->b_flags & XBF_STALE);
0558         ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
0559         ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
0560         ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
0561         ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
0562         ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags));
0563         ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
0564         return;
0565     }
0566 
0567     xfs_buf_stale(bp);
0568 
0569     bip->bli_flags |= XFS_BLI_STALE;
0570     bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
0571     bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
0572     bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
0573     bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
0574     for (i = 0; i < bip->bli_format_count; i++) {
0575         memset(bip->bli_formats[i].blf_data_map, 0,
0576                (bip->bli_formats[i].blf_map_size * sizeof(uint)));
0577     }
0578     set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
0579     tp->t_flags |= XFS_TRANS_DIRTY;
0580 }
0581 
0582 /*
0583  * This call is used to indicate that the buffer contains on-disk inodes which
0584  * must be handled specially during recovery.  They require special handling
0585  * because only the di_next_unlinked from the inodes in the buffer should be
0586  * recovered.  The rest of the data in the buffer is logged via the inodes
0587  * themselves.
0588  *
0589  * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
0590  * transferred to the buffer's log format structure so that we'll know what to
0591  * do at recovery time.
0592  */
0593 void
0594 xfs_trans_inode_buf(
0595     xfs_trans_t     *tp,
0596     struct xfs_buf      *bp)
0597 {
0598     struct xfs_buf_log_item *bip = bp->b_log_item;
0599 
0600     ASSERT(bp->b_transp == tp);
0601     ASSERT(bip != NULL);
0602     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0603 
0604     bip->bli_flags |= XFS_BLI_INODE_BUF;
0605     bp->b_flags |= _XBF_INODES;
0606     xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
0607 }
0608 
0609 /*
0610  * This call is used to indicate that the buffer is going to
0611  * be staled and was an inode buffer. This means it gets
0612  * special processing during unpin - where any inodes
0613  * associated with the buffer should be removed from ail.
0614  * There is also special processing during recovery,
0615  * any replay of the inodes in the buffer needs to be
0616  * prevented as the buffer may have been reused.
0617  */
0618 void
0619 xfs_trans_stale_inode_buf(
0620     xfs_trans_t     *tp,
0621     struct xfs_buf      *bp)
0622 {
0623     struct xfs_buf_log_item *bip = bp->b_log_item;
0624 
0625     ASSERT(bp->b_transp == tp);
0626     ASSERT(bip != NULL);
0627     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0628 
0629     bip->bli_flags |= XFS_BLI_STALE_INODE;
0630     bp->b_flags |= _XBF_INODES;
0631     xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
0632 }
0633 
0634 /*
0635  * Mark the buffer as being one which contains newly allocated
0636  * inodes.  We need to make sure that even if this buffer is
0637  * relogged as an 'inode buf' we still recover all of the inode
0638  * images in the face of a crash.  This works in coordination with
0639  * xfs_buf_item_committed() to ensure that the buffer remains in the
0640  * AIL at its original location even after it has been relogged.
0641  */
0642 /* ARGSUSED */
0643 void
0644 xfs_trans_inode_alloc_buf(
0645     xfs_trans_t     *tp,
0646     struct xfs_buf      *bp)
0647 {
0648     struct xfs_buf_log_item *bip = bp->b_log_item;
0649 
0650     ASSERT(bp->b_transp == tp);
0651     ASSERT(bip != NULL);
0652     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0653 
0654     bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
0655     bp->b_flags |= _XBF_INODES;
0656     xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
0657 }
0658 
0659 /*
0660  * Mark the buffer as ordered for this transaction. This means that the contents
0661  * of the buffer are not recorded in the transaction but it is tracked in the
0662  * AIL as though it was. This allows us to record logical changes in
0663  * transactions rather than the physical changes we make to the buffer without
0664  * changing writeback ordering constraints of metadata buffers.
0665  */
0666 bool
0667 xfs_trans_ordered_buf(
0668     struct xfs_trans    *tp,
0669     struct xfs_buf      *bp)
0670 {
0671     struct xfs_buf_log_item *bip = bp->b_log_item;
0672 
0673     ASSERT(bp->b_transp == tp);
0674     ASSERT(bip != NULL);
0675     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0676 
0677     if (xfs_buf_item_dirty_format(bip))
0678         return false;
0679 
0680     bip->bli_flags |= XFS_BLI_ORDERED;
0681     trace_xfs_buf_item_ordered(bip);
0682 
0683     /*
0684      * We don't log a dirty range of an ordered buffer but it still needs
0685      * to be marked dirty and that it has been logged.
0686      */
0687     xfs_trans_dirty_buf(tp, bp);
0688     return true;
0689 }
0690 
0691 /*
0692  * Set the type of the buffer for log recovery so that it can correctly identify
0693  * and hence attach the correct buffer ops to the buffer after replay.
0694  */
0695 void
0696 xfs_trans_buf_set_type(
0697     struct xfs_trans    *tp,
0698     struct xfs_buf      *bp,
0699     enum xfs_blft       type)
0700 {
0701     struct xfs_buf_log_item *bip = bp->b_log_item;
0702 
0703     if (!tp)
0704         return;
0705 
0706     ASSERT(bp->b_transp == tp);
0707     ASSERT(bip != NULL);
0708     ASSERT(atomic_read(&bip->bli_refcount) > 0);
0709 
0710     xfs_blft_to_flags(&bip->__bli_format, type);
0711 }
0712 
0713 void
0714 xfs_trans_buf_copy_type(
0715     struct xfs_buf      *dst_bp,
0716     struct xfs_buf      *src_bp)
0717 {
0718     struct xfs_buf_log_item *sbip = src_bp->b_log_item;
0719     struct xfs_buf_log_item *dbip = dst_bp->b_log_item;
0720     enum xfs_blft       type;
0721 
0722     type = xfs_blft_from_flags(&sbip->__bli_format);
0723     xfs_blft_to_flags(&dbip->__bli_format, type);
0724 }
0725 
0726 /*
0727  * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
0728  * dquots. However, unlike in inode buffer recovery, dquot buffers get
0729  * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
0730  * The only thing that makes dquot buffers different from regular
0731  * buffers is that we must not replay dquot bufs when recovering
0732  * if a _corresponding_ quotaoff has happened. We also have to distinguish
0733  * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
0734  * can be turned off independently.
0735  */
0736 /* ARGSUSED */
0737 void
0738 xfs_trans_dquot_buf(
0739     xfs_trans_t     *tp,
0740     struct xfs_buf      *bp,
0741     uint            type)
0742 {
0743     struct xfs_buf_log_item *bip = bp->b_log_item;
0744 
0745     ASSERT(type == XFS_BLF_UDQUOT_BUF ||
0746            type == XFS_BLF_PDQUOT_BUF ||
0747            type == XFS_BLF_GDQUOT_BUF);
0748 
0749     bip->__bli_format.blf_flags |= type;
0750 
0751     switch (type) {
0752     case XFS_BLF_UDQUOT_BUF:
0753         type = XFS_BLFT_UDQUOT_BUF;
0754         break;
0755     case XFS_BLF_PDQUOT_BUF:
0756         type = XFS_BLFT_PDQUOT_BUF;
0757         break;
0758     case XFS_BLF_GDQUOT_BUF:
0759         type = XFS_BLFT_GDQUOT_BUF;
0760         break;
0761     default:
0762         type = XFS_BLFT_UNKNOWN_BUF;
0763         break;
0764     }
0765 
0766     bp->b_flags |= _XBF_DQUOTS;
0767     xfs_trans_buf_set_type(tp, bp, type);
0768 }