Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
0004  * All Rights Reserved.
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_sb.h"
0014 #include "xfs_mount.h"
0015 #include "xfs_defer.h"
0016 #include "xfs_inode.h"
0017 #include "xfs_trans.h"
0018 #include "xfs_log.h"
0019 #include "xfs_log_priv.h"
0020 #include "xfs_log_recover.h"
0021 #include "xfs_trans_priv.h"
0022 #include "xfs_alloc.h"
0023 #include "xfs_ialloc.h"
0024 #include "xfs_trace.h"
0025 #include "xfs_icache.h"
0026 #include "xfs_error.h"
0027 #include "xfs_buf_item.h"
0028 #include "xfs_ag.h"
0029 #include "xfs_quota.h"
0030 #include "xfs_reflink.h"
0031 
0032 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
0033 
0034 STATIC int
0035 xlog_find_zeroed(
0036     struct xlog *,
0037     xfs_daddr_t *);
0038 STATIC int
0039 xlog_clear_stale_blocks(
0040     struct xlog *,
0041     xfs_lsn_t);
0042 STATIC int
0043 xlog_do_recovery_pass(
0044         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
0045 
0046 /*
0047  * Sector aligned buffer routines for buffer create/read/write/access
0048  */
0049 
0050 /*
0051  * Verify the log-relative block number and length in basic blocks are valid for
0052  * an operation involving the given XFS log buffer. Returns true if the fields
0053  * are valid, false otherwise.
0054  */
0055 static inline bool
0056 xlog_verify_bno(
0057     struct xlog *log,
0058     xfs_daddr_t blk_no,
0059     int     bbcount)
0060 {
0061     if (blk_no < 0 || blk_no >= log->l_logBBsize)
0062         return false;
0063     if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
0064         return false;
0065     return true;
0066 }
0067 
0068 /*
0069  * Allocate a buffer to hold log data.  The buffer needs to be able to map to
0070  * a range of nbblks basic blocks at any valid offset within the log.
0071  */
0072 static char *
0073 xlog_alloc_buffer(
0074     struct xlog *log,
0075     int     nbblks)
0076 {
0077     /*
0078      * Pass log block 0 since we don't have an addr yet, buffer will be
0079      * verified on read.
0080      */
0081     if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
0082         xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
0083             nbblks);
0084         return NULL;
0085     }
0086 
0087     /*
0088      * We do log I/O in units of log sectors (a power-of-2 multiple of the
0089      * basic block size), so we round up the requested size to accommodate
0090      * the basic blocks required for complete log sectors.
0091      *
0092      * In addition, the buffer may be used for a non-sector-aligned block
0093      * offset, in which case an I/O of the requested size could extend
0094      * beyond the end of the buffer.  If the requested size is only 1 basic
0095      * block it will never straddle a sector boundary, so this won't be an
0096      * issue.  Nor will this be a problem if the log I/O is done in basic
0097      * blocks (sector size 1).  But otherwise we extend the buffer by one
0098      * extra log sector to ensure there's space to accommodate this
0099      * possibility.
0100      */
0101     if (nbblks > 1 && log->l_sectBBsize > 1)
0102         nbblks += log->l_sectBBsize;
0103     nbblks = round_up(nbblks, log->l_sectBBsize);
0104     return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
0105 }
0106 
0107 /*
0108  * Return the address of the start of the given block number's data
0109  * in a log buffer.  The buffer covers a log sector-aligned region.
0110  */
0111 static inline unsigned int
0112 xlog_align(
0113     struct xlog *log,
0114     xfs_daddr_t blk_no)
0115 {
0116     return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
0117 }
0118 
0119 static int
0120 xlog_do_io(
0121     struct xlog     *log,
0122     xfs_daddr_t     blk_no,
0123     unsigned int        nbblks,
0124     char            *data,
0125     enum req_op     op)
0126 {
0127     int         error;
0128 
0129     if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
0130         xfs_warn(log->l_mp,
0131              "Invalid log block/length (0x%llx, 0x%x) for buffer",
0132              blk_no, nbblks);
0133         return -EFSCORRUPTED;
0134     }
0135 
0136     blk_no = round_down(blk_no, log->l_sectBBsize);
0137     nbblks = round_up(nbblks, log->l_sectBBsize);
0138     ASSERT(nbblks > 0);
0139 
0140     error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
0141             BBTOB(nbblks), data, op);
0142     if (error && !xlog_is_shutdown(log)) {
0143         xfs_alert(log->l_mp,
0144               "log recovery %s I/O error at daddr 0x%llx len %d error %d",
0145               op == REQ_OP_WRITE ? "write" : "read",
0146               blk_no, nbblks, error);
0147     }
0148     return error;
0149 }
0150 
0151 STATIC int
0152 xlog_bread_noalign(
0153     struct xlog *log,
0154     xfs_daddr_t blk_no,
0155     int     nbblks,
0156     char        *data)
0157 {
0158     return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
0159 }
0160 
0161 STATIC int
0162 xlog_bread(
0163     struct xlog *log,
0164     xfs_daddr_t blk_no,
0165     int     nbblks,
0166     char        *data,
0167     char        **offset)
0168 {
0169     int     error;
0170 
0171     error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
0172     if (!error)
0173         *offset = data + xlog_align(log, blk_no);
0174     return error;
0175 }
0176 
0177 STATIC int
0178 xlog_bwrite(
0179     struct xlog *log,
0180     xfs_daddr_t blk_no,
0181     int     nbblks,
0182     char        *data)
0183 {
0184     return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
0185 }
0186 
0187 #ifdef DEBUG
0188 /*
0189  * dump debug superblock and log record information
0190  */
0191 STATIC void
0192 xlog_header_check_dump(
0193     xfs_mount_t     *mp,
0194     xlog_rec_header_t   *head)
0195 {
0196     xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
0197         __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
0198     xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
0199         &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
0200 }
0201 #else
0202 #define xlog_header_check_dump(mp, head)
0203 #endif
0204 
0205 /*
0206  * check log record header for recovery
0207  */
0208 STATIC int
0209 xlog_header_check_recover(
0210     xfs_mount_t     *mp,
0211     xlog_rec_header_t   *head)
0212 {
0213     ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
0214 
0215     /*
0216      * IRIX doesn't write the h_fmt field and leaves it zeroed
0217      * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
0218      * a dirty log created in IRIX.
0219      */
0220     if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
0221         xfs_warn(mp,
0222     "dirty log written in incompatible format - can't recover");
0223         xlog_header_check_dump(mp, head);
0224         return -EFSCORRUPTED;
0225     }
0226     if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
0227                        &head->h_fs_uuid))) {
0228         xfs_warn(mp,
0229     "dirty log entry has mismatched uuid - can't recover");
0230         xlog_header_check_dump(mp, head);
0231         return -EFSCORRUPTED;
0232     }
0233     return 0;
0234 }
0235 
0236 /*
0237  * read the head block of the log and check the header
0238  */
0239 STATIC int
0240 xlog_header_check_mount(
0241     xfs_mount_t     *mp,
0242     xlog_rec_header_t   *head)
0243 {
0244     ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
0245 
0246     if (uuid_is_null(&head->h_fs_uuid)) {
0247         /*
0248          * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
0249          * h_fs_uuid is null, we assume this log was last mounted
0250          * by IRIX and continue.
0251          */
0252         xfs_warn(mp, "null uuid in log - IRIX style log");
0253     } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
0254                           &head->h_fs_uuid))) {
0255         xfs_warn(mp, "log has mismatched uuid - can't recover");
0256         xlog_header_check_dump(mp, head);
0257         return -EFSCORRUPTED;
0258     }
0259     return 0;
0260 }
0261 
0262 /*
0263  * This routine finds (to an approximation) the first block in the physical
0264  * log which contains the given cycle.  It uses a binary search algorithm.
0265  * Note that the algorithm can not be perfect because the disk will not
0266  * necessarily be perfect.
0267  */
0268 STATIC int
0269 xlog_find_cycle_start(
0270     struct xlog *log,
0271     char        *buffer,
0272     xfs_daddr_t first_blk,
0273     xfs_daddr_t *last_blk,
0274     uint        cycle)
0275 {
0276     char        *offset;
0277     xfs_daddr_t mid_blk;
0278     xfs_daddr_t end_blk;
0279     uint        mid_cycle;
0280     int     error;
0281 
0282     end_blk = *last_blk;
0283     mid_blk = BLK_AVG(first_blk, end_blk);
0284     while (mid_blk != first_blk && mid_blk != end_blk) {
0285         error = xlog_bread(log, mid_blk, 1, buffer, &offset);
0286         if (error)
0287             return error;
0288         mid_cycle = xlog_get_cycle(offset);
0289         if (mid_cycle == cycle)
0290             end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
0291         else
0292             first_blk = mid_blk; /* first_half_cycle == mid_cycle */
0293         mid_blk = BLK_AVG(first_blk, end_blk);
0294     }
0295     ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
0296            (mid_blk == end_blk && mid_blk-1 == first_blk));
0297 
0298     *last_blk = end_blk;
0299 
0300     return 0;
0301 }
0302 
0303 /*
0304  * Check that a range of blocks does not contain stop_on_cycle_no.
0305  * Fill in *new_blk with the block offset where such a block is
0306  * found, or with -1 (an invalid block number) if there is no such
0307  * block in the range.  The scan needs to occur from front to back
0308  * and the pointer into the region must be updated since a later
0309  * routine will need to perform another test.
0310  */
0311 STATIC int
0312 xlog_find_verify_cycle(
0313     struct xlog *log,
0314     xfs_daddr_t start_blk,
0315     int     nbblks,
0316     uint        stop_on_cycle_no,
0317     xfs_daddr_t *new_blk)
0318 {
0319     xfs_daddr_t i, j;
0320     uint        cycle;
0321     char        *buffer;
0322     xfs_daddr_t bufblks;
0323     char        *buf = NULL;
0324     int     error = 0;
0325 
0326     /*
0327      * Greedily allocate a buffer big enough to handle the full
0328      * range of basic blocks we'll be examining.  If that fails,
0329      * try a smaller size.  We need to be able to read at least
0330      * a log sector, or we're out of luck.
0331      */
0332     bufblks = 1 << ffs(nbblks);
0333     while (bufblks > log->l_logBBsize)
0334         bufblks >>= 1;
0335     while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
0336         bufblks >>= 1;
0337         if (bufblks < log->l_sectBBsize)
0338             return -ENOMEM;
0339     }
0340 
0341     for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
0342         int bcount;
0343 
0344         bcount = min(bufblks, (start_blk + nbblks - i));
0345 
0346         error = xlog_bread(log, i, bcount, buffer, &buf);
0347         if (error)
0348             goto out;
0349 
0350         for (j = 0; j < bcount; j++) {
0351             cycle = xlog_get_cycle(buf);
0352             if (cycle == stop_on_cycle_no) {
0353                 *new_blk = i+j;
0354                 goto out;
0355             }
0356 
0357             buf += BBSIZE;
0358         }
0359     }
0360 
0361     *new_blk = -1;
0362 
0363 out:
0364     kmem_free(buffer);
0365     return error;
0366 }
0367 
0368 static inline int
0369 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
0370 {
0371     if (xfs_has_logv2(log->l_mp)) {
0372         int h_size = be32_to_cpu(rh->h_size);
0373 
0374         if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
0375             h_size > XLOG_HEADER_CYCLE_SIZE)
0376             return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
0377     }
0378     return 1;
0379 }
0380 
0381 /*
0382  * Potentially backup over partial log record write.
0383  *
0384  * In the typical case, last_blk is the number of the block directly after
0385  * a good log record.  Therefore, we subtract one to get the block number
0386  * of the last block in the given buffer.  extra_bblks contains the number
0387  * of blocks we would have read on a previous read.  This happens when the
0388  * last log record is split over the end of the physical log.
0389  *
0390  * extra_bblks is the number of blocks potentially verified on a previous
0391  * call to this routine.
0392  */
0393 STATIC int
0394 xlog_find_verify_log_record(
0395     struct xlog     *log,
0396     xfs_daddr_t     start_blk,
0397     xfs_daddr_t     *last_blk,
0398     int         extra_bblks)
0399 {
0400     xfs_daddr_t     i;
0401     char            *buffer;
0402     char            *offset = NULL;
0403     xlog_rec_header_t   *head = NULL;
0404     int         error = 0;
0405     int         smallmem = 0;
0406     int         num_blks = *last_blk - start_blk;
0407     int         xhdrs;
0408 
0409     ASSERT(start_blk != 0 || *last_blk != start_blk);
0410 
0411     buffer = xlog_alloc_buffer(log, num_blks);
0412     if (!buffer) {
0413         buffer = xlog_alloc_buffer(log, 1);
0414         if (!buffer)
0415             return -ENOMEM;
0416         smallmem = 1;
0417     } else {
0418         error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
0419         if (error)
0420             goto out;
0421         offset += ((num_blks - 1) << BBSHIFT);
0422     }
0423 
0424     for (i = (*last_blk) - 1; i >= 0; i--) {
0425         if (i < start_blk) {
0426             /* valid log record not found */
0427             xfs_warn(log->l_mp,
0428         "Log inconsistent (didn't find previous header)");
0429             ASSERT(0);
0430             error = -EFSCORRUPTED;
0431             goto out;
0432         }
0433 
0434         if (smallmem) {
0435             error = xlog_bread(log, i, 1, buffer, &offset);
0436             if (error)
0437                 goto out;
0438         }
0439 
0440         head = (xlog_rec_header_t *)offset;
0441 
0442         if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
0443             break;
0444 
0445         if (!smallmem)
0446             offset -= BBSIZE;
0447     }
0448 
0449     /*
0450      * We hit the beginning of the physical log & still no header.  Return
0451      * to caller.  If caller can handle a return of -1, then this routine
0452      * will be called again for the end of the physical log.
0453      */
0454     if (i == -1) {
0455         error = 1;
0456         goto out;
0457     }
0458 
0459     /*
0460      * We have the final block of the good log (the first block
0461      * of the log record _before_ the head. So we check the uuid.
0462      */
0463     if ((error = xlog_header_check_mount(log->l_mp, head)))
0464         goto out;
0465 
0466     /*
0467      * We may have found a log record header before we expected one.
0468      * last_blk will be the 1st block # with a given cycle #.  We may end
0469      * up reading an entire log record.  In this case, we don't want to
0470      * reset last_blk.  Only when last_blk points in the middle of a log
0471      * record do we update last_blk.
0472      */
0473     xhdrs = xlog_logrec_hblks(log, head);
0474 
0475     if (*last_blk - i + extra_bblks !=
0476         BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
0477         *last_blk = i;
0478 
0479 out:
0480     kmem_free(buffer);
0481     return error;
0482 }
0483 
0484 /*
0485  * Head is defined to be the point of the log where the next log write
0486  * could go.  This means that incomplete LR writes at the end are
0487  * eliminated when calculating the head.  We aren't guaranteed that previous
0488  * LR have complete transactions.  We only know that a cycle number of
0489  * current cycle number -1 won't be present in the log if we start writing
0490  * from our current block number.
0491  *
0492  * last_blk contains the block number of the first block with a given
0493  * cycle number.
0494  *
0495  * Return: zero if normal, non-zero if error.
0496  */
0497 STATIC int
0498 xlog_find_head(
0499     struct xlog *log,
0500     xfs_daddr_t *return_head_blk)
0501 {
0502     char        *buffer;
0503     char        *offset;
0504     xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
0505     int     num_scan_bblks;
0506     uint        first_half_cycle, last_half_cycle;
0507     uint        stop_on_cycle;
0508     int     error, log_bbnum = log->l_logBBsize;
0509 
0510     /* Is the end of the log device zeroed? */
0511     error = xlog_find_zeroed(log, &first_blk);
0512     if (error < 0) {
0513         xfs_warn(log->l_mp, "empty log check failed");
0514         return error;
0515     }
0516     if (error == 1) {
0517         *return_head_blk = first_blk;
0518 
0519         /* Is the whole lot zeroed? */
0520         if (!first_blk) {
0521             /* Linux XFS shouldn't generate totally zeroed logs -
0522              * mkfs etc write a dummy unmount record to a fresh
0523              * log so we can store the uuid in there
0524              */
0525             xfs_warn(log->l_mp, "totally zeroed log");
0526         }
0527 
0528         return 0;
0529     }
0530 
0531     first_blk = 0;          /* get cycle # of 1st block */
0532     buffer = xlog_alloc_buffer(log, 1);
0533     if (!buffer)
0534         return -ENOMEM;
0535 
0536     error = xlog_bread(log, 0, 1, buffer, &offset);
0537     if (error)
0538         goto out_free_buffer;
0539 
0540     first_half_cycle = xlog_get_cycle(offset);
0541 
0542     last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
0543     error = xlog_bread(log, last_blk, 1, buffer, &offset);
0544     if (error)
0545         goto out_free_buffer;
0546 
0547     last_half_cycle = xlog_get_cycle(offset);
0548     ASSERT(last_half_cycle != 0);
0549 
0550     /*
0551      * If the 1st half cycle number is equal to the last half cycle number,
0552      * then the entire log is stamped with the same cycle number.  In this
0553      * case, head_blk can't be set to zero (which makes sense).  The below
0554      * math doesn't work out properly with head_blk equal to zero.  Instead,
0555      * we set it to log_bbnum which is an invalid block number, but this
0556      * value makes the math correct.  If head_blk doesn't changed through
0557      * all the tests below, *head_blk is set to zero at the very end rather
0558      * than log_bbnum.  In a sense, log_bbnum and zero are the same block
0559      * in a circular file.
0560      */
0561     if (first_half_cycle == last_half_cycle) {
0562         /*
0563          * In this case we believe that the entire log should have
0564          * cycle number last_half_cycle.  We need to scan backwards
0565          * from the end verifying that there are no holes still
0566          * containing last_half_cycle - 1.  If we find such a hole,
0567          * then the start of that hole will be the new head.  The
0568          * simple case looks like
0569          *        x | x ... | x - 1 | x
0570          * Another case that fits this picture would be
0571          *        x | x + 1 | x ... | x
0572          * In this case the head really is somewhere at the end of the
0573          * log, as one of the latest writes at the beginning was
0574          * incomplete.
0575          * One more case is
0576          *        x | x + 1 | x ... | x - 1 | x
0577          * This is really the combination of the above two cases, and
0578          * the head has to end up at the start of the x-1 hole at the
0579          * end of the log.
0580          *
0581          * In the 256k log case, we will read from the beginning to the
0582          * end of the log and search for cycle numbers equal to x-1.
0583          * We don't worry about the x+1 blocks that we encounter,
0584          * because we know that they cannot be the head since the log
0585          * started with x.
0586          */
0587         head_blk = log_bbnum;
0588         stop_on_cycle = last_half_cycle - 1;
0589     } else {
0590         /*
0591          * In this case we want to find the first block with cycle
0592          * number matching last_half_cycle.  We expect the log to be
0593          * some variation on
0594          *        x + 1 ... | x ... | x
0595          * The first block with cycle number x (last_half_cycle) will
0596          * be where the new head belongs.  First we do a binary search
0597          * for the first occurrence of last_half_cycle.  The binary
0598          * search may not be totally accurate, so then we scan back
0599          * from there looking for occurrences of last_half_cycle before
0600          * us.  If that backwards scan wraps around the beginning of
0601          * the log, then we look for occurrences of last_half_cycle - 1
0602          * at the end of the log.  The cases we're looking for look
0603          * like
0604          *                               v binary search stopped here
0605          *        x + 1 ... | x | x + 1 | x ... | x
0606          *                   ^ but we want to locate this spot
0607          * or
0608          *        <---------> less than scan distance
0609          *        x + 1 ... | x ... | x - 1 | x
0610          *                           ^ we want to locate this spot
0611          */
0612         stop_on_cycle = last_half_cycle;
0613         error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
0614                 last_half_cycle);
0615         if (error)
0616             goto out_free_buffer;
0617     }
0618 
0619     /*
0620      * Now validate the answer.  Scan back some number of maximum possible
0621      * blocks and make sure each one has the expected cycle number.  The
0622      * maximum is determined by the total possible amount of buffering
0623      * in the in-core log.  The following number can be made tighter if
0624      * we actually look at the block size of the filesystem.
0625      */
0626     num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
0627     if (head_blk >= num_scan_bblks) {
0628         /*
0629          * We are guaranteed that the entire check can be performed
0630          * in one buffer.
0631          */
0632         start_blk = head_blk - num_scan_bblks;
0633         if ((error = xlog_find_verify_cycle(log,
0634                         start_blk, num_scan_bblks,
0635                         stop_on_cycle, &new_blk)))
0636             goto out_free_buffer;
0637         if (new_blk != -1)
0638             head_blk = new_blk;
0639     } else {        /* need to read 2 parts of log */
0640         /*
0641          * We are going to scan backwards in the log in two parts.
0642          * First we scan the physical end of the log.  In this part
0643          * of the log, we are looking for blocks with cycle number
0644          * last_half_cycle - 1.
0645          * If we find one, then we know that the log starts there, as
0646          * we've found a hole that didn't get written in going around
0647          * the end of the physical log.  The simple case for this is
0648          *        x + 1 ... | x ... | x - 1 | x
0649          *        <---------> less than scan distance
0650          * If all of the blocks at the end of the log have cycle number
0651          * last_half_cycle, then we check the blocks at the start of
0652          * the log looking for occurrences of last_half_cycle.  If we
0653          * find one, then our current estimate for the location of the
0654          * first occurrence of last_half_cycle is wrong and we move
0655          * back to the hole we've found.  This case looks like
0656          *        x + 1 ... | x | x + 1 | x ...
0657          *                               ^ binary search stopped here
0658          * Another case we need to handle that only occurs in 256k
0659          * logs is
0660          *        x + 1 ... | x ... | x+1 | x ...
0661          *                   ^ binary search stops here
0662          * In a 256k log, the scan at the end of the log will see the
0663          * x + 1 blocks.  We need to skip past those since that is
0664          * certainly not the head of the log.  By searching for
0665          * last_half_cycle-1 we accomplish that.
0666          */
0667         ASSERT(head_blk <= INT_MAX &&
0668             (xfs_daddr_t) num_scan_bblks >= head_blk);
0669         start_blk = log_bbnum - (num_scan_bblks - head_blk);
0670         if ((error = xlog_find_verify_cycle(log, start_blk,
0671                     num_scan_bblks - (int)head_blk,
0672                     (stop_on_cycle - 1), &new_blk)))
0673             goto out_free_buffer;
0674         if (new_blk != -1) {
0675             head_blk = new_blk;
0676             goto validate_head;
0677         }
0678 
0679         /*
0680          * Scan beginning of log now.  The last part of the physical
0681          * log is good.  This scan needs to verify that it doesn't find
0682          * the last_half_cycle.
0683          */
0684         start_blk = 0;
0685         ASSERT(head_blk <= INT_MAX);
0686         if ((error = xlog_find_verify_cycle(log,
0687                     start_blk, (int)head_blk,
0688                     stop_on_cycle, &new_blk)))
0689             goto out_free_buffer;
0690         if (new_blk != -1)
0691             head_blk = new_blk;
0692     }
0693 
0694 validate_head:
0695     /*
0696      * Now we need to make sure head_blk is not pointing to a block in
0697      * the middle of a log record.
0698      */
0699     num_scan_bblks = XLOG_REC_SHIFT(log);
0700     if (head_blk >= num_scan_bblks) {
0701         start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
0702 
0703         /* start ptr at last block ptr before head_blk */
0704         error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
0705         if (error == 1)
0706             error = -EIO;
0707         if (error)
0708             goto out_free_buffer;
0709     } else {
0710         start_blk = 0;
0711         ASSERT(head_blk <= INT_MAX);
0712         error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
0713         if (error < 0)
0714             goto out_free_buffer;
0715         if (error == 1) {
0716             /* We hit the beginning of the log during our search */
0717             start_blk = log_bbnum - (num_scan_bblks - head_blk);
0718             new_blk = log_bbnum;
0719             ASSERT(start_blk <= INT_MAX &&
0720                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
0721             ASSERT(head_blk <= INT_MAX);
0722             error = xlog_find_verify_log_record(log, start_blk,
0723                             &new_blk, (int)head_blk);
0724             if (error == 1)
0725                 error = -EIO;
0726             if (error)
0727                 goto out_free_buffer;
0728             if (new_blk != log_bbnum)
0729                 head_blk = new_blk;
0730         } else if (error)
0731             goto out_free_buffer;
0732     }
0733 
0734     kmem_free(buffer);
0735     if (head_blk == log_bbnum)
0736         *return_head_blk = 0;
0737     else
0738         *return_head_blk = head_blk;
0739     /*
0740      * When returning here, we have a good block number.  Bad block
0741      * means that during a previous crash, we didn't have a clean break
0742      * from cycle number N to cycle number N-1.  In this case, we need
0743      * to find the first block with cycle number N-1.
0744      */
0745     return 0;
0746 
0747 out_free_buffer:
0748     kmem_free(buffer);
0749     if (error)
0750         xfs_warn(log->l_mp, "failed to find log head");
0751     return error;
0752 }
0753 
0754 /*
0755  * Seek backwards in the log for log record headers.
0756  *
0757  * Given a starting log block, walk backwards until we find the provided number
0758  * of records or hit the provided tail block. The return value is the number of
0759  * records encountered or a negative error code. The log block and buffer
0760  * pointer of the last record seen are returned in rblk and rhead respectively.
0761  */
0762 STATIC int
0763 xlog_rseek_logrec_hdr(
0764     struct xlog     *log,
0765     xfs_daddr_t     head_blk,
0766     xfs_daddr_t     tail_blk,
0767     int         count,
0768     char            *buffer,
0769     xfs_daddr_t     *rblk,
0770     struct xlog_rec_header  **rhead,
0771     bool            *wrapped)
0772 {
0773     int         i;
0774     int         error;
0775     int         found = 0;
0776     char            *offset = NULL;
0777     xfs_daddr_t     end_blk;
0778 
0779     *wrapped = false;
0780 
0781     /*
0782      * Walk backwards from the head block until we hit the tail or the first
0783      * block in the log.
0784      */
0785     end_blk = head_blk > tail_blk ? tail_blk : 0;
0786     for (i = (int) head_blk - 1; i >= end_blk; i--) {
0787         error = xlog_bread(log, i, 1, buffer, &offset);
0788         if (error)
0789             goto out_error;
0790 
0791         if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
0792             *rblk = i;
0793             *rhead = (struct xlog_rec_header *) offset;
0794             if (++found == count)
0795                 break;
0796         }
0797     }
0798 
0799     /*
0800      * If we haven't hit the tail block or the log record header count,
0801      * start looking again from the end of the physical log. Note that
0802      * callers can pass head == tail if the tail is not yet known.
0803      */
0804     if (tail_blk >= head_blk && found != count) {
0805         for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
0806             error = xlog_bread(log, i, 1, buffer, &offset);
0807             if (error)
0808                 goto out_error;
0809 
0810             if (*(__be32 *)offset ==
0811                 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
0812                 *wrapped = true;
0813                 *rblk = i;
0814                 *rhead = (struct xlog_rec_header *) offset;
0815                 if (++found == count)
0816                     break;
0817             }
0818         }
0819     }
0820 
0821     return found;
0822 
0823 out_error:
0824     return error;
0825 }
0826 
0827 /*
0828  * Seek forward in the log for log record headers.
0829  *
0830  * Given head and tail blocks, walk forward from the tail block until we find
0831  * the provided number of records or hit the head block. The return value is the
0832  * number of records encountered or a negative error code. The log block and
0833  * buffer pointer of the last record seen are returned in rblk and rhead
0834  * respectively.
0835  */
0836 STATIC int
0837 xlog_seek_logrec_hdr(
0838     struct xlog     *log,
0839     xfs_daddr_t     head_blk,
0840     xfs_daddr_t     tail_blk,
0841     int         count,
0842     char            *buffer,
0843     xfs_daddr_t     *rblk,
0844     struct xlog_rec_header  **rhead,
0845     bool            *wrapped)
0846 {
0847     int         i;
0848     int         error;
0849     int         found = 0;
0850     char            *offset = NULL;
0851     xfs_daddr_t     end_blk;
0852 
0853     *wrapped = false;
0854 
0855     /*
0856      * Walk forward from the tail block until we hit the head or the last
0857      * block in the log.
0858      */
0859     end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
0860     for (i = (int) tail_blk; i <= end_blk; i++) {
0861         error = xlog_bread(log, i, 1, buffer, &offset);
0862         if (error)
0863             goto out_error;
0864 
0865         if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
0866             *rblk = i;
0867             *rhead = (struct xlog_rec_header *) offset;
0868             if (++found == count)
0869                 break;
0870         }
0871     }
0872 
0873     /*
0874      * If we haven't hit the head block or the log record header count,
0875      * start looking again from the start of the physical log.
0876      */
0877     if (tail_blk > head_blk && found != count) {
0878         for (i = 0; i < (int) head_blk; i++) {
0879             error = xlog_bread(log, i, 1, buffer, &offset);
0880             if (error)
0881                 goto out_error;
0882 
0883             if (*(__be32 *)offset ==
0884                 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
0885                 *wrapped = true;
0886                 *rblk = i;
0887                 *rhead = (struct xlog_rec_header *) offset;
0888                 if (++found == count)
0889                     break;
0890             }
0891         }
0892     }
0893 
0894     return found;
0895 
0896 out_error:
0897     return error;
0898 }
0899 
0900 /*
0901  * Calculate distance from head to tail (i.e., unused space in the log).
0902  */
0903 static inline int
0904 xlog_tail_distance(
0905     struct xlog *log,
0906     xfs_daddr_t head_blk,
0907     xfs_daddr_t tail_blk)
0908 {
0909     if (head_blk < tail_blk)
0910         return tail_blk - head_blk;
0911 
0912     return tail_blk + (log->l_logBBsize - head_blk);
0913 }
0914 
0915 /*
0916  * Verify the log tail. This is particularly important when torn or incomplete
0917  * writes have been detected near the front of the log and the head has been
0918  * walked back accordingly.
0919  *
0920  * We also have to handle the case where the tail was pinned and the head
0921  * blocked behind the tail right before a crash. If the tail had been pushed
0922  * immediately prior to the crash and the subsequent checkpoint was only
0923  * partially written, it's possible it overwrote the last referenced tail in the
0924  * log with garbage. This is not a coherency problem because the tail must have
0925  * been pushed before it can be overwritten, but appears as log corruption to
0926  * recovery because we have no way to know the tail was updated if the
0927  * subsequent checkpoint didn't write successfully.
0928  *
0929  * Therefore, CRC check the log from tail to head. If a failure occurs and the
0930  * offending record is within max iclog bufs from the head, walk the tail
0931  * forward and retry until a valid tail is found or corruption is detected out
0932  * of the range of a possible overwrite.
0933  */
0934 STATIC int
0935 xlog_verify_tail(
0936     struct xlog     *log,
0937     xfs_daddr_t     head_blk,
0938     xfs_daddr_t     *tail_blk,
0939     int         hsize)
0940 {
0941     struct xlog_rec_header  *thead;
0942     char            *buffer;
0943     xfs_daddr_t     first_bad;
0944     int         error = 0;
0945     bool            wrapped;
0946     xfs_daddr_t     tmp_tail;
0947     xfs_daddr_t     orig_tail = *tail_blk;
0948 
0949     buffer = xlog_alloc_buffer(log, 1);
0950     if (!buffer)
0951         return -ENOMEM;
0952 
0953     /*
0954      * Make sure the tail points to a record (returns positive count on
0955      * success).
0956      */
0957     error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
0958             &tmp_tail, &thead, &wrapped);
0959     if (error < 0)
0960         goto out;
0961     if (*tail_blk != tmp_tail)
0962         *tail_blk = tmp_tail;
0963 
0964     /*
0965      * Run a CRC check from the tail to the head. We can't just check
0966      * MAX_ICLOGS records past the tail because the tail may point to stale
0967      * blocks cleared during the search for the head/tail. These blocks are
0968      * overwritten with zero-length records and thus record count is not a
0969      * reliable indicator of the iclog state before a crash.
0970      */
0971     first_bad = 0;
0972     error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
0973                       XLOG_RECOVER_CRCPASS, &first_bad);
0974     while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
0975         int tail_distance;
0976 
0977         /*
0978          * Is corruption within range of the head? If so, retry from
0979          * the next record. Otherwise return an error.
0980          */
0981         tail_distance = xlog_tail_distance(log, head_blk, first_bad);
0982         if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
0983             break;
0984 
0985         /* skip to the next record; returns positive count on success */
0986         error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
0987                 buffer, &tmp_tail, &thead, &wrapped);
0988         if (error < 0)
0989             goto out;
0990 
0991         *tail_blk = tmp_tail;
0992         first_bad = 0;
0993         error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
0994                           XLOG_RECOVER_CRCPASS, &first_bad);
0995     }
0996 
0997     if (!error && *tail_blk != orig_tail)
0998         xfs_warn(log->l_mp,
0999         "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1000              orig_tail, *tail_blk);
1001 out:
1002     kmem_free(buffer);
1003     return error;
1004 }
1005 
1006 /*
1007  * Detect and trim torn writes from the head of the log.
1008  *
1009  * Storage without sector atomicity guarantees can result in torn writes in the
1010  * log in the event of a crash. Our only means to detect this scenario is via
1011  * CRC verification. While we can't always be certain that CRC verification
1012  * failure is due to a torn write vs. an unrelated corruption, we do know that
1013  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1014  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1015  * the log and treat failures in this range as torn writes as a matter of
1016  * policy. In the event of CRC failure, the head is walked back to the last good
1017  * record in the log and the tail is updated from that record and verified.
1018  */
1019 STATIC int
1020 xlog_verify_head(
1021     struct xlog     *log,
1022     xfs_daddr_t     *head_blk,  /* in/out: unverified head */
1023     xfs_daddr_t     *tail_blk,  /* out: tail block */
1024     char            *buffer,
1025     xfs_daddr_t     *rhead_blk, /* start blk of last record */
1026     struct xlog_rec_header  **rhead,    /* ptr to last record */
1027     bool            *wrapped)   /* last rec. wraps phys. log */
1028 {
1029     struct xlog_rec_header  *tmp_rhead;
1030     char            *tmp_buffer;
1031     xfs_daddr_t     first_bad;
1032     xfs_daddr_t     tmp_rhead_blk;
1033     int         found;
1034     int         error;
1035     bool            tmp_wrapped;
1036 
1037     /*
1038      * Check the head of the log for torn writes. Search backwards from the
1039      * head until we hit the tail or the maximum number of log record I/Os
1040      * that could have been in flight at one time. Use a temporary buffer so
1041      * we don't trash the rhead/buffer pointers from the caller.
1042      */
1043     tmp_buffer = xlog_alloc_buffer(log, 1);
1044     if (!tmp_buffer)
1045         return -ENOMEM;
1046     error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1047                       XLOG_MAX_ICLOGS, tmp_buffer,
1048                       &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1049     kmem_free(tmp_buffer);
1050     if (error < 0)
1051         return error;
1052 
1053     /*
1054      * Now run a CRC verification pass over the records starting at the
1055      * block found above to the current head. If a CRC failure occurs, the
1056      * log block of the first bad record is saved in first_bad.
1057      */
1058     error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1059                       XLOG_RECOVER_CRCPASS, &first_bad);
1060     if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1061         /*
1062          * We've hit a potential torn write. Reset the error and warn
1063          * about it.
1064          */
1065         error = 0;
1066         xfs_warn(log->l_mp,
1067 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1068              first_bad, *head_blk);
1069 
1070         /*
1071          * Get the header block and buffer pointer for the last good
1072          * record before the bad record.
1073          *
1074          * Note that xlog_find_tail() clears the blocks at the new head
1075          * (i.e., the records with invalid CRC) if the cycle number
1076          * matches the current cycle.
1077          */
1078         found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1079                 buffer, rhead_blk, rhead, wrapped);
1080         if (found < 0)
1081             return found;
1082         if (found == 0)     /* XXX: right thing to do here? */
1083             return -EIO;
1084 
1085         /*
1086          * Reset the head block to the starting block of the first bad
1087          * log record and set the tail block based on the last good
1088          * record.
1089          *
1090          * Bail out if the updated head/tail match as this indicates
1091          * possible corruption outside of the acceptable
1092          * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1093          */
1094         *head_blk = first_bad;
1095         *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1096         if (*head_blk == *tail_blk) {
1097             ASSERT(0);
1098             return 0;
1099         }
1100     }
1101     if (error)
1102         return error;
1103 
1104     return xlog_verify_tail(log, *head_blk, tail_blk,
1105                 be32_to_cpu((*rhead)->h_size));
1106 }
1107 
1108 /*
1109  * We need to make sure we handle log wrapping properly, so we can't use the
1110  * calculated logbno directly. Make sure it wraps to the correct bno inside the
1111  * log.
1112  *
1113  * The log is limited to 32 bit sizes, so we use the appropriate modulus
1114  * operation here and cast it back to a 64 bit daddr on return.
1115  */
1116 static inline xfs_daddr_t
1117 xlog_wrap_logbno(
1118     struct xlog     *log,
1119     xfs_daddr_t     bno)
1120 {
1121     int         mod;
1122 
1123     div_s64_rem(bno, log->l_logBBsize, &mod);
1124     return mod;
1125 }
1126 
1127 /*
1128  * Check whether the head of the log points to an unmount record. In other
1129  * words, determine whether the log is clean. If so, update the in-core state
1130  * appropriately.
1131  */
1132 static int
1133 xlog_check_unmount_rec(
1134     struct xlog     *log,
1135     xfs_daddr_t     *head_blk,
1136     xfs_daddr_t     *tail_blk,
1137     struct xlog_rec_header  *rhead,
1138     xfs_daddr_t     rhead_blk,
1139     char            *buffer,
1140     bool            *clean)
1141 {
1142     struct xlog_op_header   *op_head;
1143     xfs_daddr_t     umount_data_blk;
1144     xfs_daddr_t     after_umount_blk;
1145     int         hblks;
1146     int         error;
1147     char            *offset;
1148 
1149     *clean = false;
1150 
1151     /*
1152      * Look for unmount record. If we find it, then we know there was a
1153      * clean unmount. Since 'i' could be the last block in the physical
1154      * log, we convert to a log block before comparing to the head_blk.
1155      *
1156      * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1157      * below. We won't want to clear the unmount record if there is one, so
1158      * we pass the lsn of the unmount record rather than the block after it.
1159      */
1160     hblks = xlog_logrec_hblks(log, rhead);
1161     after_umount_blk = xlog_wrap_logbno(log,
1162             rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1163 
1164     if (*head_blk == after_umount_blk &&
1165         be32_to_cpu(rhead->h_num_logops) == 1) {
1166         umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1167         error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1168         if (error)
1169             return error;
1170 
1171         op_head = (struct xlog_op_header *)offset;
1172         if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1173             /*
1174              * Set tail and last sync so that newly written log
1175              * records will point recovery to after the current
1176              * unmount record.
1177              */
1178             xlog_assign_atomic_lsn(&log->l_tail_lsn,
1179                     log->l_curr_cycle, after_umount_blk);
1180             xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1181                     log->l_curr_cycle, after_umount_blk);
1182             *tail_blk = after_umount_blk;
1183 
1184             *clean = true;
1185         }
1186     }
1187 
1188     return 0;
1189 }
1190 
1191 static void
1192 xlog_set_state(
1193     struct xlog     *log,
1194     xfs_daddr_t     head_blk,
1195     struct xlog_rec_header  *rhead,
1196     xfs_daddr_t     rhead_blk,
1197     bool            bump_cycle)
1198 {
1199     /*
1200      * Reset log values according to the state of the log when we
1201      * crashed.  In the case where head_blk == 0, we bump curr_cycle
1202      * one because the next write starts a new cycle rather than
1203      * continuing the cycle of the last good log record.  At this
1204      * point we have guaranteed that all partial log records have been
1205      * accounted for.  Therefore, we know that the last good log record
1206      * written was complete and ended exactly on the end boundary
1207      * of the physical log.
1208      */
1209     log->l_prev_block = rhead_blk;
1210     log->l_curr_block = (int)head_blk;
1211     log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1212     if (bump_cycle)
1213         log->l_curr_cycle++;
1214     atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1215     atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1216     xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1217                     BBTOB(log->l_curr_block));
1218     xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1219                     BBTOB(log->l_curr_block));
1220 }
1221 
1222 /*
1223  * Find the sync block number or the tail of the log.
1224  *
1225  * This will be the block number of the last record to have its
1226  * associated buffers synced to disk.  Every log record header has
1227  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
1228  * to get a sync block number.  The only concern is to figure out which
1229  * log record header to believe.
1230  *
1231  * The following algorithm uses the log record header with the largest
1232  * lsn.  The entire log record does not need to be valid.  We only care
1233  * that the header is valid.
1234  *
1235  * We could speed up search by using current head_blk buffer, but it is not
1236  * available.
1237  */
1238 STATIC int
1239 xlog_find_tail(
1240     struct xlog     *log,
1241     xfs_daddr_t     *head_blk,
1242     xfs_daddr_t     *tail_blk)
1243 {
1244     xlog_rec_header_t   *rhead;
1245     char            *offset = NULL;
1246     char            *buffer;
1247     int         error;
1248     xfs_daddr_t     rhead_blk;
1249     xfs_lsn_t       tail_lsn;
1250     bool            wrapped = false;
1251     bool            clean = false;
1252 
1253     /*
1254      * Find previous log record
1255      */
1256     if ((error = xlog_find_head(log, head_blk)))
1257         return error;
1258     ASSERT(*head_blk < INT_MAX);
1259 
1260     buffer = xlog_alloc_buffer(log, 1);
1261     if (!buffer)
1262         return -ENOMEM;
1263     if (*head_blk == 0) {               /* special case */
1264         error = xlog_bread(log, 0, 1, buffer, &offset);
1265         if (error)
1266             goto done;
1267 
1268         if (xlog_get_cycle(offset) == 0) {
1269             *tail_blk = 0;
1270             /* leave all other log inited values alone */
1271             goto done;
1272         }
1273     }
1274 
1275     /*
1276      * Search backwards through the log looking for the log record header
1277      * block. This wraps all the way back around to the head so something is
1278      * seriously wrong if we can't find it.
1279      */
1280     error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1281                       &rhead_blk, &rhead, &wrapped);
1282     if (error < 0)
1283         goto done;
1284     if (!error) {
1285         xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1286         error = -EFSCORRUPTED;
1287         goto done;
1288     }
1289     *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1290 
1291     /*
1292      * Set the log state based on the current head record.
1293      */
1294     xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1295     tail_lsn = atomic64_read(&log->l_tail_lsn);
1296 
1297     /*
1298      * Look for an unmount record at the head of the log. This sets the log
1299      * state to determine whether recovery is necessary.
1300      */
1301     error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1302                        rhead_blk, buffer, &clean);
1303     if (error)
1304         goto done;
1305 
1306     /*
1307      * Verify the log head if the log is not clean (e.g., we have anything
1308      * but an unmount record at the head). This uses CRC verification to
1309      * detect and trim torn writes. If discovered, CRC failures are
1310      * considered torn writes and the log head is trimmed accordingly.
1311      *
1312      * Note that we can only run CRC verification when the log is dirty
1313      * because there's no guarantee that the log data behind an unmount
1314      * record is compatible with the current architecture.
1315      */
1316     if (!clean) {
1317         xfs_daddr_t orig_head = *head_blk;
1318 
1319         error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1320                      &rhead_blk, &rhead, &wrapped);
1321         if (error)
1322             goto done;
1323 
1324         /* update in-core state again if the head changed */
1325         if (*head_blk != orig_head) {
1326             xlog_set_state(log, *head_blk, rhead, rhead_blk,
1327                        wrapped);
1328             tail_lsn = atomic64_read(&log->l_tail_lsn);
1329             error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1330                                rhead, rhead_blk, buffer,
1331                                &clean);
1332             if (error)
1333                 goto done;
1334         }
1335     }
1336 
1337     /*
1338      * Note that the unmount was clean. If the unmount was not clean, we
1339      * need to know this to rebuild the superblock counters from the perag
1340      * headers if we have a filesystem using non-persistent counters.
1341      */
1342     if (clean)
1343         set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate);
1344 
1345     /*
1346      * Make sure that there are no blocks in front of the head
1347      * with the same cycle number as the head.  This can happen
1348      * because we allow multiple outstanding log writes concurrently,
1349      * and the later writes might make it out before earlier ones.
1350      *
1351      * We use the lsn from before modifying it so that we'll never
1352      * overwrite the unmount record after a clean unmount.
1353      *
1354      * Do this only if we are going to recover the filesystem
1355      *
1356      * NOTE: This used to say "if (!readonly)"
1357      * However on Linux, we can & do recover a read-only filesystem.
1358      * We only skip recovery if NORECOVERY is specified on mount,
1359      * in which case we would not be here.
1360      *
1361      * But... if the -device- itself is readonly, just skip this.
1362      * We can't recover this device anyway, so it won't matter.
1363      */
1364     if (!xfs_readonly_buftarg(log->l_targ))
1365         error = xlog_clear_stale_blocks(log, tail_lsn);
1366 
1367 done:
1368     kmem_free(buffer);
1369 
1370     if (error)
1371         xfs_warn(log->l_mp, "failed to locate log tail");
1372     return error;
1373 }
1374 
1375 /*
1376  * Is the log zeroed at all?
1377  *
1378  * The last binary search should be changed to perform an X block read
1379  * once X becomes small enough.  You can then search linearly through
1380  * the X blocks.  This will cut down on the number of reads we need to do.
1381  *
1382  * If the log is partially zeroed, this routine will pass back the blkno
1383  * of the first block with cycle number 0.  It won't have a complete LR
1384  * preceding it.
1385  *
1386  * Return:
1387  *  0  => the log is completely written to
1388  *  1 => use *blk_no as the first block of the log
1389  *  <0 => error has occurred
1390  */
1391 STATIC int
1392 xlog_find_zeroed(
1393     struct xlog *log,
1394     xfs_daddr_t *blk_no)
1395 {
1396     char        *buffer;
1397     char        *offset;
1398     uint            first_cycle, last_cycle;
1399     xfs_daddr_t new_blk, last_blk, start_blk;
1400     xfs_daddr_t     num_scan_bblks;
1401     int         error, log_bbnum = log->l_logBBsize;
1402 
1403     *blk_no = 0;
1404 
1405     /* check totally zeroed log */
1406     buffer = xlog_alloc_buffer(log, 1);
1407     if (!buffer)
1408         return -ENOMEM;
1409     error = xlog_bread(log, 0, 1, buffer, &offset);
1410     if (error)
1411         goto out_free_buffer;
1412 
1413     first_cycle = xlog_get_cycle(offset);
1414     if (first_cycle == 0) {     /* completely zeroed log */
1415         *blk_no = 0;
1416         kmem_free(buffer);
1417         return 1;
1418     }
1419 
1420     /* check partially zeroed log */
1421     error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1422     if (error)
1423         goto out_free_buffer;
1424 
1425     last_cycle = xlog_get_cycle(offset);
1426     if (last_cycle != 0) {      /* log completely written to */
1427         kmem_free(buffer);
1428         return 0;
1429     }
1430 
1431     /* we have a partially zeroed log */
1432     last_blk = log_bbnum-1;
1433     error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1434     if (error)
1435         goto out_free_buffer;
1436 
1437     /*
1438      * Validate the answer.  Because there is no way to guarantee that
1439      * the entire log is made up of log records which are the same size,
1440      * we scan over the defined maximum blocks.  At this point, the maximum
1441      * is not chosen to mean anything special.   XXXmiken
1442      */
1443     num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1444     ASSERT(num_scan_bblks <= INT_MAX);
1445 
1446     if (last_blk < num_scan_bblks)
1447         num_scan_bblks = last_blk;
1448     start_blk = last_blk - num_scan_bblks;
1449 
1450     /*
1451      * We search for any instances of cycle number 0 that occur before
1452      * our current estimate of the head.  What we're trying to detect is
1453      *        1 ... | 0 | 1 | 0...
1454      *                       ^ binary search ends here
1455      */
1456     if ((error = xlog_find_verify_cycle(log, start_blk,
1457                      (int)num_scan_bblks, 0, &new_blk)))
1458         goto out_free_buffer;
1459     if (new_blk != -1)
1460         last_blk = new_blk;
1461 
1462     /*
1463      * Potentially backup over partial log record write.  We don't need
1464      * to search the end of the log because we know it is zero.
1465      */
1466     error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1467     if (error == 1)
1468         error = -EIO;
1469     if (error)
1470         goto out_free_buffer;
1471 
1472     *blk_no = last_blk;
1473 out_free_buffer:
1474     kmem_free(buffer);
1475     if (error)
1476         return error;
1477     return 1;
1478 }
1479 
1480 /*
1481  * These are simple subroutines used by xlog_clear_stale_blocks() below
1482  * to initialize a buffer full of empty log record headers and write
1483  * them into the log.
1484  */
1485 STATIC void
1486 xlog_add_record(
1487     struct xlog     *log,
1488     char            *buf,
1489     int         cycle,
1490     int         block,
1491     int         tail_cycle,
1492     int         tail_block)
1493 {
1494     xlog_rec_header_t   *recp = (xlog_rec_header_t *)buf;
1495 
1496     memset(buf, 0, BBSIZE);
1497     recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1498     recp->h_cycle = cpu_to_be32(cycle);
1499     recp->h_version = cpu_to_be32(
1500             xfs_has_logv2(log->l_mp) ? 2 : 1);
1501     recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1502     recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1503     recp->h_fmt = cpu_to_be32(XLOG_FMT);
1504     memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1505 }
1506 
1507 STATIC int
1508 xlog_write_log_records(
1509     struct xlog *log,
1510     int     cycle,
1511     int     start_block,
1512     int     blocks,
1513     int     tail_cycle,
1514     int     tail_block)
1515 {
1516     char        *offset;
1517     char        *buffer;
1518     int     balign, ealign;
1519     int     sectbb = log->l_sectBBsize;
1520     int     end_block = start_block + blocks;
1521     int     bufblks;
1522     int     error = 0;
1523     int     i, j = 0;
1524 
1525     /*
1526      * Greedily allocate a buffer big enough to handle the full
1527      * range of basic blocks to be written.  If that fails, try
1528      * a smaller size.  We need to be able to write at least a
1529      * log sector, or we're out of luck.
1530      */
1531     bufblks = 1 << ffs(blocks);
1532     while (bufblks > log->l_logBBsize)
1533         bufblks >>= 1;
1534     while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1535         bufblks >>= 1;
1536         if (bufblks < sectbb)
1537             return -ENOMEM;
1538     }
1539 
1540     /* We may need to do a read at the start to fill in part of
1541      * the buffer in the starting sector not covered by the first
1542      * write below.
1543      */
1544     balign = round_down(start_block, sectbb);
1545     if (balign != start_block) {
1546         error = xlog_bread_noalign(log, start_block, 1, buffer);
1547         if (error)
1548             goto out_free_buffer;
1549 
1550         j = start_block - balign;
1551     }
1552 
1553     for (i = start_block; i < end_block; i += bufblks) {
1554         int     bcount, endcount;
1555 
1556         bcount = min(bufblks, end_block - start_block);
1557         endcount = bcount - j;
1558 
1559         /* We may need to do a read at the end to fill in part of
1560          * the buffer in the final sector not covered by the write.
1561          * If this is the same sector as the above read, skip it.
1562          */
1563         ealign = round_down(end_block, sectbb);
1564         if (j == 0 && (start_block + endcount > ealign)) {
1565             error = xlog_bread_noalign(log, ealign, sectbb,
1566                     buffer + BBTOB(ealign - start_block));
1567             if (error)
1568                 break;
1569 
1570         }
1571 
1572         offset = buffer + xlog_align(log, start_block);
1573         for (; j < endcount; j++) {
1574             xlog_add_record(log, offset, cycle, i+j,
1575                     tail_cycle, tail_block);
1576             offset += BBSIZE;
1577         }
1578         error = xlog_bwrite(log, start_block, endcount, buffer);
1579         if (error)
1580             break;
1581         start_block += endcount;
1582         j = 0;
1583     }
1584 
1585 out_free_buffer:
1586     kmem_free(buffer);
1587     return error;
1588 }
1589 
1590 /*
1591  * This routine is called to blow away any incomplete log writes out
1592  * in front of the log head.  We do this so that we won't become confused
1593  * if we come up, write only a little bit more, and then crash again.
1594  * If we leave the partial log records out there, this situation could
1595  * cause us to think those partial writes are valid blocks since they
1596  * have the current cycle number.  We get rid of them by overwriting them
1597  * with empty log records with the old cycle number rather than the
1598  * current one.
1599  *
1600  * The tail lsn is passed in rather than taken from
1601  * the log so that we will not write over the unmount record after a
1602  * clean unmount in a 512 block log.  Doing so would leave the log without
1603  * any valid log records in it until a new one was written.  If we crashed
1604  * during that time we would not be able to recover.
1605  */
1606 STATIC int
1607 xlog_clear_stale_blocks(
1608     struct xlog *log,
1609     xfs_lsn_t   tail_lsn)
1610 {
1611     int     tail_cycle, head_cycle;
1612     int     tail_block, head_block;
1613     int     tail_distance, max_distance;
1614     int     distance;
1615     int     error;
1616 
1617     tail_cycle = CYCLE_LSN(tail_lsn);
1618     tail_block = BLOCK_LSN(tail_lsn);
1619     head_cycle = log->l_curr_cycle;
1620     head_block = log->l_curr_block;
1621 
1622     /*
1623      * Figure out the distance between the new head of the log
1624      * and the tail.  We want to write over any blocks beyond the
1625      * head that we may have written just before the crash, but
1626      * we don't want to overwrite the tail of the log.
1627      */
1628     if (head_cycle == tail_cycle) {
1629         /*
1630          * The tail is behind the head in the physical log,
1631          * so the distance from the head to the tail is the
1632          * distance from the head to the end of the log plus
1633          * the distance from the beginning of the log to the
1634          * tail.
1635          */
1636         if (XFS_IS_CORRUPT(log->l_mp,
1637                    head_block < tail_block ||
1638                    head_block >= log->l_logBBsize))
1639             return -EFSCORRUPTED;
1640         tail_distance = tail_block + (log->l_logBBsize - head_block);
1641     } else {
1642         /*
1643          * The head is behind the tail in the physical log,
1644          * so the distance from the head to the tail is just
1645          * the tail block minus the head block.
1646          */
1647         if (XFS_IS_CORRUPT(log->l_mp,
1648                    head_block >= tail_block ||
1649                    head_cycle != tail_cycle + 1))
1650             return -EFSCORRUPTED;
1651         tail_distance = tail_block - head_block;
1652     }
1653 
1654     /*
1655      * If the head is right up against the tail, we can't clear
1656      * anything.
1657      */
1658     if (tail_distance <= 0) {
1659         ASSERT(tail_distance == 0);
1660         return 0;
1661     }
1662 
1663     max_distance = XLOG_TOTAL_REC_SHIFT(log);
1664     /*
1665      * Take the smaller of the maximum amount of outstanding I/O
1666      * we could have and the distance to the tail to clear out.
1667      * We take the smaller so that we don't overwrite the tail and
1668      * we don't waste all day writing from the head to the tail
1669      * for no reason.
1670      */
1671     max_distance = min(max_distance, tail_distance);
1672 
1673     if ((head_block + max_distance) <= log->l_logBBsize) {
1674         /*
1675          * We can stomp all the blocks we need to without
1676          * wrapping around the end of the log.  Just do it
1677          * in a single write.  Use the cycle number of the
1678          * current cycle minus one so that the log will look like:
1679          *     n ... | n - 1 ...
1680          */
1681         error = xlog_write_log_records(log, (head_cycle - 1),
1682                 head_block, max_distance, tail_cycle,
1683                 tail_block);
1684         if (error)
1685             return error;
1686     } else {
1687         /*
1688          * We need to wrap around the end of the physical log in
1689          * order to clear all the blocks.  Do it in two separate
1690          * I/Os.  The first write should be from the head to the
1691          * end of the physical log, and it should use the current
1692          * cycle number minus one just like above.
1693          */
1694         distance = log->l_logBBsize - head_block;
1695         error = xlog_write_log_records(log, (head_cycle - 1),
1696                 head_block, distance, tail_cycle,
1697                 tail_block);
1698 
1699         if (error)
1700             return error;
1701 
1702         /*
1703          * Now write the blocks at the start of the physical log.
1704          * This writes the remainder of the blocks we want to clear.
1705          * It uses the current cycle number since we're now on the
1706          * same cycle as the head so that we get:
1707          *    n ... n ... | n - 1 ...
1708          *    ^^^^^ blocks we're writing
1709          */
1710         distance = max_distance - (log->l_logBBsize - head_block);
1711         error = xlog_write_log_records(log, head_cycle, 0, distance,
1712                 tail_cycle, tail_block);
1713         if (error)
1714             return error;
1715     }
1716 
1717     return 0;
1718 }
1719 
1720 /*
1721  * Release the recovered intent item in the AIL that matches the given intent
1722  * type and intent id.
1723  */
1724 void
1725 xlog_recover_release_intent(
1726     struct xlog     *log,
1727     unsigned short      intent_type,
1728     uint64_t        intent_id)
1729 {
1730     struct xfs_ail_cursor   cur;
1731     struct xfs_log_item *lip;
1732     struct xfs_ail      *ailp = log->l_ailp;
1733 
1734     spin_lock(&ailp->ail_lock);
1735     for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1736          lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1737         if (lip->li_type != intent_type)
1738             continue;
1739         if (!lip->li_ops->iop_match(lip, intent_id))
1740             continue;
1741 
1742         spin_unlock(&ailp->ail_lock);
1743         lip->li_ops->iop_release(lip);
1744         spin_lock(&ailp->ail_lock);
1745         break;
1746     }
1747 
1748     xfs_trans_ail_cursor_done(&cur);
1749     spin_unlock(&ailp->ail_lock);
1750 }
1751 
1752 int
1753 xlog_recover_iget(
1754     struct xfs_mount    *mp,
1755     xfs_ino_t       ino,
1756     struct xfs_inode    **ipp)
1757 {
1758     int         error;
1759 
1760     error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
1761     if (error)
1762         return error;
1763 
1764     error = xfs_qm_dqattach(*ipp);
1765     if (error) {
1766         xfs_irele(*ipp);
1767         return error;
1768     }
1769 
1770     if (VFS_I(*ipp)->i_nlink == 0)
1771         xfs_iflags_set(*ipp, XFS_IRECOVERY);
1772 
1773     return 0;
1774 }
1775 
1776 /******************************************************************************
1777  *
1778  *      Log recover routines
1779  *
1780  ******************************************************************************
1781  */
1782 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1783     &xlog_buf_item_ops,
1784     &xlog_inode_item_ops,
1785     &xlog_dquot_item_ops,
1786     &xlog_quotaoff_item_ops,
1787     &xlog_icreate_item_ops,
1788     &xlog_efi_item_ops,
1789     &xlog_efd_item_ops,
1790     &xlog_rui_item_ops,
1791     &xlog_rud_item_ops,
1792     &xlog_cui_item_ops,
1793     &xlog_cud_item_ops,
1794     &xlog_bui_item_ops,
1795     &xlog_bud_item_ops,
1796     &xlog_attri_item_ops,
1797     &xlog_attrd_item_ops,
1798 };
1799 
1800 static const struct xlog_recover_item_ops *
1801 xlog_find_item_ops(
1802     struct xlog_recover_item        *item)
1803 {
1804     unsigned int                i;
1805 
1806     for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1807         if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1808             return xlog_recover_item_ops[i];
1809 
1810     return NULL;
1811 }
1812 
1813 /*
1814  * Sort the log items in the transaction.
1815  *
1816  * The ordering constraints are defined by the inode allocation and unlink
1817  * behaviour. The rules are:
1818  *
1819  *  1. Every item is only logged once in a given transaction. Hence it
1820  *     represents the last logged state of the item. Hence ordering is
1821  *     dependent on the order in which operations need to be performed so
1822  *     required initial conditions are always met.
1823  *
1824  *  2. Cancelled buffers are recorded in pass 1 in a separate table and
1825  *     there's nothing to replay from them so we can simply cull them
1826  *     from the transaction. However, we can't do that until after we've
1827  *     replayed all the other items because they may be dependent on the
1828  *     cancelled buffer and replaying the cancelled buffer can remove it
1829  *     form the cancelled buffer table. Hence they have tobe done last.
1830  *
1831  *  3. Inode allocation buffers must be replayed before inode items that
1832  *     read the buffer and replay changes into it. For filesystems using the
1833  *     ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1834  *     treated the same as inode allocation buffers as they create and
1835  *     initialise the buffers directly.
1836  *
1837  *  4. Inode unlink buffers must be replayed after inode items are replayed.
1838  *     This ensures that inodes are completely flushed to the inode buffer
1839  *     in a "free" state before we remove the unlinked inode list pointer.
1840  *
1841  * Hence the ordering needs to be inode allocation buffers first, inode items
1842  * second, inode unlink buffers third and cancelled buffers last.
1843  *
1844  * But there's a problem with that - we can't tell an inode allocation buffer
1845  * apart from a regular buffer, so we can't separate them. We can, however,
1846  * tell an inode unlink buffer from the others, and so we can separate them out
1847  * from all the other buffers and move them to last.
1848  *
1849  * Hence, 4 lists, in order from head to tail:
1850  *  - buffer_list for all buffers except cancelled/inode unlink buffers
1851  *  - item_list for all non-buffer items
1852  *  - inode_buffer_list for inode unlink buffers
1853  *  - cancel_list for the cancelled buffers
1854  *
1855  * Note that we add objects to the tail of the lists so that first-to-last
1856  * ordering is preserved within the lists. Adding objects to the head of the
1857  * list means when we traverse from the head we walk them in last-to-first
1858  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1859  * but for all other items there may be specific ordering that we need to
1860  * preserve.
1861  */
1862 STATIC int
1863 xlog_recover_reorder_trans(
1864     struct xlog     *log,
1865     struct xlog_recover *trans,
1866     int         pass)
1867 {
1868     struct xlog_recover_item *item, *n;
1869     int         error = 0;
1870     LIST_HEAD(sort_list);
1871     LIST_HEAD(cancel_list);
1872     LIST_HEAD(buffer_list);
1873     LIST_HEAD(inode_buffer_list);
1874     LIST_HEAD(item_list);
1875 
1876     list_splice_init(&trans->r_itemq, &sort_list);
1877     list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1878         enum xlog_recover_reorder   fate = XLOG_REORDER_ITEM_LIST;
1879 
1880         item->ri_ops = xlog_find_item_ops(item);
1881         if (!item->ri_ops) {
1882             xfs_warn(log->l_mp,
1883                 "%s: unrecognized type of log operation (%d)",
1884                 __func__, ITEM_TYPE(item));
1885             ASSERT(0);
1886             /*
1887              * return the remaining items back to the transaction
1888              * item list so they can be freed in caller.
1889              */
1890             if (!list_empty(&sort_list))
1891                 list_splice_init(&sort_list, &trans->r_itemq);
1892             error = -EFSCORRUPTED;
1893             break;
1894         }
1895 
1896         if (item->ri_ops->reorder)
1897             fate = item->ri_ops->reorder(item);
1898 
1899         switch (fate) {
1900         case XLOG_REORDER_BUFFER_LIST:
1901             list_move_tail(&item->ri_list, &buffer_list);
1902             break;
1903         case XLOG_REORDER_CANCEL_LIST:
1904             trace_xfs_log_recover_item_reorder_head(log,
1905                     trans, item, pass);
1906             list_move(&item->ri_list, &cancel_list);
1907             break;
1908         case XLOG_REORDER_INODE_BUFFER_LIST:
1909             list_move(&item->ri_list, &inode_buffer_list);
1910             break;
1911         case XLOG_REORDER_ITEM_LIST:
1912             trace_xfs_log_recover_item_reorder_tail(log,
1913                             trans, item, pass);
1914             list_move_tail(&item->ri_list, &item_list);
1915             break;
1916         }
1917     }
1918 
1919     ASSERT(list_empty(&sort_list));
1920     if (!list_empty(&buffer_list))
1921         list_splice(&buffer_list, &trans->r_itemq);
1922     if (!list_empty(&item_list))
1923         list_splice_tail(&item_list, &trans->r_itemq);
1924     if (!list_empty(&inode_buffer_list))
1925         list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1926     if (!list_empty(&cancel_list))
1927         list_splice_tail(&cancel_list, &trans->r_itemq);
1928     return error;
1929 }
1930 
1931 void
1932 xlog_buf_readahead(
1933     struct xlog     *log,
1934     xfs_daddr_t     blkno,
1935     uint            len,
1936     const struct xfs_buf_ops *ops)
1937 {
1938     if (!xlog_is_buffer_cancelled(log, blkno, len))
1939         xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1940 }
1941 
1942 STATIC int
1943 xlog_recover_items_pass2(
1944     struct xlog                     *log,
1945     struct xlog_recover             *trans,
1946     struct list_head                *buffer_list,
1947     struct list_head                *item_list)
1948 {
1949     struct xlog_recover_item    *item;
1950     int             error = 0;
1951 
1952     list_for_each_entry(item, item_list, ri_list) {
1953         trace_xfs_log_recover_item_recover(log, trans, item,
1954                 XLOG_RECOVER_PASS2);
1955 
1956         if (item->ri_ops->commit_pass2)
1957             error = item->ri_ops->commit_pass2(log, buffer_list,
1958                     item, trans->r_lsn);
1959         if (error)
1960             return error;
1961     }
1962 
1963     return error;
1964 }
1965 
1966 /*
1967  * Perform the transaction.
1968  *
1969  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
1970  * EFIs and EFDs get queued up by adding entries into the AIL for them.
1971  */
1972 STATIC int
1973 xlog_recover_commit_trans(
1974     struct xlog     *log,
1975     struct xlog_recover *trans,
1976     int         pass,
1977     struct list_head    *buffer_list)
1978 {
1979     int             error = 0;
1980     int             items_queued = 0;
1981     struct xlog_recover_item    *item;
1982     struct xlog_recover_item    *next;
1983     LIST_HEAD           (ra_list);
1984     LIST_HEAD           (done_list);
1985 
1986     #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
1987 
1988     hlist_del_init(&trans->r_list);
1989 
1990     error = xlog_recover_reorder_trans(log, trans, pass);
1991     if (error)
1992         return error;
1993 
1994     list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
1995         trace_xfs_log_recover_item_recover(log, trans, item, pass);
1996 
1997         switch (pass) {
1998         case XLOG_RECOVER_PASS1:
1999             if (item->ri_ops->commit_pass1)
2000                 error = item->ri_ops->commit_pass1(log, item);
2001             break;
2002         case XLOG_RECOVER_PASS2:
2003             if (item->ri_ops->ra_pass2)
2004                 item->ri_ops->ra_pass2(log, item);
2005             list_move_tail(&item->ri_list, &ra_list);
2006             items_queued++;
2007             if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
2008                 error = xlog_recover_items_pass2(log, trans,
2009                         buffer_list, &ra_list);
2010                 list_splice_tail_init(&ra_list, &done_list);
2011                 items_queued = 0;
2012             }
2013 
2014             break;
2015         default:
2016             ASSERT(0);
2017         }
2018 
2019         if (error)
2020             goto out;
2021     }
2022 
2023 out:
2024     if (!list_empty(&ra_list)) {
2025         if (!error)
2026             error = xlog_recover_items_pass2(log, trans,
2027                     buffer_list, &ra_list);
2028         list_splice_tail_init(&ra_list, &done_list);
2029     }
2030 
2031     if (!list_empty(&done_list))
2032         list_splice_init(&done_list, &trans->r_itemq);
2033 
2034     return error;
2035 }
2036 
2037 STATIC void
2038 xlog_recover_add_item(
2039     struct list_head    *head)
2040 {
2041     struct xlog_recover_item *item;
2042 
2043     item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2044     INIT_LIST_HEAD(&item->ri_list);
2045     list_add_tail(&item->ri_list, head);
2046 }
2047 
2048 STATIC int
2049 xlog_recover_add_to_cont_trans(
2050     struct xlog     *log,
2051     struct xlog_recover *trans,
2052     char            *dp,
2053     int         len)
2054 {
2055     struct xlog_recover_item *item;
2056     char            *ptr, *old_ptr;
2057     int         old_len;
2058 
2059     /*
2060      * If the transaction is empty, the header was split across this and the
2061      * previous record. Copy the rest of the header.
2062      */
2063     if (list_empty(&trans->r_itemq)) {
2064         ASSERT(len <= sizeof(struct xfs_trans_header));
2065         if (len > sizeof(struct xfs_trans_header)) {
2066             xfs_warn(log->l_mp, "%s: bad header length", __func__);
2067             return -EFSCORRUPTED;
2068         }
2069 
2070         xlog_recover_add_item(&trans->r_itemq);
2071         ptr = (char *)&trans->r_theader +
2072                 sizeof(struct xfs_trans_header) - len;
2073         memcpy(ptr, dp, len);
2074         return 0;
2075     }
2076 
2077     /* take the tail entry */
2078     item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2079               ri_list);
2080 
2081     old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2082     old_len = item->ri_buf[item->ri_cnt-1].i_len;
2083 
2084     ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
2085     if (!ptr)
2086         return -ENOMEM;
2087     memcpy(&ptr[old_len], dp, len);
2088     item->ri_buf[item->ri_cnt-1].i_len += len;
2089     item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2090     trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2091     return 0;
2092 }
2093 
2094 /*
2095  * The next region to add is the start of a new region.  It could be
2096  * a whole region or it could be the first part of a new region.  Because
2097  * of this, the assumption here is that the type and size fields of all
2098  * format structures fit into the first 32 bits of the structure.
2099  *
2100  * This works because all regions must be 32 bit aligned.  Therefore, we
2101  * either have both fields or we have neither field.  In the case we have
2102  * neither field, the data part of the region is zero length.  We only have
2103  * a log_op_header and can throw away the header since a new one will appear
2104  * later.  If we have at least 4 bytes, then we can determine how many regions
2105  * will appear in the current log item.
2106  */
2107 STATIC int
2108 xlog_recover_add_to_trans(
2109     struct xlog     *log,
2110     struct xlog_recover *trans,
2111     char            *dp,
2112     int         len)
2113 {
2114     struct xfs_inode_log_format *in_f;          /* any will do */
2115     struct xlog_recover_item *item;
2116     char            *ptr;
2117 
2118     if (!len)
2119         return 0;
2120     if (list_empty(&trans->r_itemq)) {
2121         /* we need to catch log corruptions here */
2122         if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2123             xfs_warn(log->l_mp, "%s: bad header magic number",
2124                 __func__);
2125             ASSERT(0);
2126             return -EFSCORRUPTED;
2127         }
2128 
2129         if (len > sizeof(struct xfs_trans_header)) {
2130             xfs_warn(log->l_mp, "%s: bad header length", __func__);
2131             ASSERT(0);
2132             return -EFSCORRUPTED;
2133         }
2134 
2135         /*
2136          * The transaction header can be arbitrarily split across op
2137          * records. If we don't have the whole thing here, copy what we
2138          * do have and handle the rest in the next record.
2139          */
2140         if (len == sizeof(struct xfs_trans_header))
2141             xlog_recover_add_item(&trans->r_itemq);
2142         memcpy(&trans->r_theader, dp, len);
2143         return 0;
2144     }
2145 
2146     ptr = kmem_alloc(len, 0);
2147     memcpy(ptr, dp, len);
2148     in_f = (struct xfs_inode_log_format *)ptr;
2149 
2150     /* take the tail entry */
2151     item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2152               ri_list);
2153     if (item->ri_total != 0 &&
2154          item->ri_total == item->ri_cnt) {
2155         /* tail item is in use, get a new one */
2156         xlog_recover_add_item(&trans->r_itemq);
2157         item = list_entry(trans->r_itemq.prev,
2158                     struct xlog_recover_item, ri_list);
2159     }
2160 
2161     if (item->ri_total == 0) {      /* first region to be added */
2162         if (in_f->ilf_size == 0 ||
2163             in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2164             xfs_warn(log->l_mp,
2165         "bad number of regions (%d) in inode log format",
2166                   in_f->ilf_size);
2167             ASSERT(0);
2168             kmem_free(ptr);
2169             return -EFSCORRUPTED;
2170         }
2171 
2172         item->ri_total = in_f->ilf_size;
2173         item->ri_buf =
2174             kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2175                     0);
2176     }
2177 
2178     if (item->ri_total <= item->ri_cnt) {
2179         xfs_warn(log->l_mp,
2180     "log item region count (%d) overflowed size (%d)",
2181                 item->ri_cnt, item->ri_total);
2182         ASSERT(0);
2183         kmem_free(ptr);
2184         return -EFSCORRUPTED;
2185     }
2186 
2187     /* Description region is ri_buf[0] */
2188     item->ri_buf[item->ri_cnt].i_addr = ptr;
2189     item->ri_buf[item->ri_cnt].i_len  = len;
2190     item->ri_cnt++;
2191     trace_xfs_log_recover_item_add(log, trans, item, 0);
2192     return 0;
2193 }
2194 
2195 /*
2196  * Free up any resources allocated by the transaction
2197  *
2198  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2199  */
2200 STATIC void
2201 xlog_recover_free_trans(
2202     struct xlog_recover *trans)
2203 {
2204     struct xlog_recover_item *item, *n;
2205     int         i;
2206 
2207     hlist_del_init(&trans->r_list);
2208 
2209     list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2210         /* Free the regions in the item. */
2211         list_del(&item->ri_list);
2212         for (i = 0; i < item->ri_cnt; i++)
2213             kmem_free(item->ri_buf[i].i_addr);
2214         /* Free the item itself */
2215         kmem_free(item->ri_buf);
2216         kmem_free(item);
2217     }
2218     /* Free the transaction recover structure */
2219     kmem_free(trans);
2220 }
2221 
2222 /*
2223  * On error or completion, trans is freed.
2224  */
2225 STATIC int
2226 xlog_recovery_process_trans(
2227     struct xlog     *log,
2228     struct xlog_recover *trans,
2229     char            *dp,
2230     unsigned int        len,
2231     unsigned int        flags,
2232     int         pass,
2233     struct list_head    *buffer_list)
2234 {
2235     int         error = 0;
2236     bool            freeit = false;
2237 
2238     /* mask off ophdr transaction container flags */
2239     flags &= ~XLOG_END_TRANS;
2240     if (flags & XLOG_WAS_CONT_TRANS)
2241         flags &= ~XLOG_CONTINUE_TRANS;
2242 
2243     /*
2244      * Callees must not free the trans structure. We'll decide if we need to
2245      * free it or not based on the operation being done and it's result.
2246      */
2247     switch (flags) {
2248     /* expected flag values */
2249     case 0:
2250     case XLOG_CONTINUE_TRANS:
2251         error = xlog_recover_add_to_trans(log, trans, dp, len);
2252         break;
2253     case XLOG_WAS_CONT_TRANS:
2254         error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2255         break;
2256     case XLOG_COMMIT_TRANS:
2257         error = xlog_recover_commit_trans(log, trans, pass,
2258                           buffer_list);
2259         /* success or fail, we are now done with this transaction. */
2260         freeit = true;
2261         break;
2262 
2263     /* unexpected flag values */
2264     case XLOG_UNMOUNT_TRANS:
2265         /* just skip trans */
2266         xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2267         freeit = true;
2268         break;
2269     case XLOG_START_TRANS:
2270     default:
2271         xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2272         ASSERT(0);
2273         error = -EFSCORRUPTED;
2274         break;
2275     }
2276     if (error || freeit)
2277         xlog_recover_free_trans(trans);
2278     return error;
2279 }
2280 
2281 /*
2282  * Lookup the transaction recovery structure associated with the ID in the
2283  * current ophdr. If the transaction doesn't exist and the start flag is set in
2284  * the ophdr, then allocate a new transaction for future ID matches to find.
2285  * Either way, return what we found during the lookup - an existing transaction
2286  * or nothing.
2287  */
2288 STATIC struct xlog_recover *
2289 xlog_recover_ophdr_to_trans(
2290     struct hlist_head   rhash[],
2291     struct xlog_rec_header  *rhead,
2292     struct xlog_op_header   *ohead)
2293 {
2294     struct xlog_recover *trans;
2295     xlog_tid_t      tid;
2296     struct hlist_head   *rhp;
2297 
2298     tid = be32_to_cpu(ohead->oh_tid);
2299     rhp = &rhash[XLOG_RHASH(tid)];
2300     hlist_for_each_entry(trans, rhp, r_list) {
2301         if (trans->r_log_tid == tid)
2302             return trans;
2303     }
2304 
2305     /*
2306      * skip over non-start transaction headers - we could be
2307      * processing slack space before the next transaction starts
2308      */
2309     if (!(ohead->oh_flags & XLOG_START_TRANS))
2310         return NULL;
2311 
2312     ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2313 
2314     /*
2315      * This is a new transaction so allocate a new recovery container to
2316      * hold the recovery ops that will follow.
2317      */
2318     trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2319     trans->r_log_tid = tid;
2320     trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2321     INIT_LIST_HEAD(&trans->r_itemq);
2322     INIT_HLIST_NODE(&trans->r_list);
2323     hlist_add_head(&trans->r_list, rhp);
2324 
2325     /*
2326      * Nothing more to do for this ophdr. Items to be added to this new
2327      * transaction will be in subsequent ophdr containers.
2328      */
2329     return NULL;
2330 }
2331 
2332 STATIC int
2333 xlog_recover_process_ophdr(
2334     struct xlog     *log,
2335     struct hlist_head   rhash[],
2336     struct xlog_rec_header  *rhead,
2337     struct xlog_op_header   *ohead,
2338     char            *dp,
2339     char            *end,
2340     int         pass,
2341     struct list_head    *buffer_list)
2342 {
2343     struct xlog_recover *trans;
2344     unsigned int        len;
2345     int         error;
2346 
2347     /* Do we understand who wrote this op? */
2348     if (ohead->oh_clientid != XFS_TRANSACTION &&
2349         ohead->oh_clientid != XFS_LOG) {
2350         xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2351             __func__, ohead->oh_clientid);
2352         ASSERT(0);
2353         return -EFSCORRUPTED;
2354     }
2355 
2356     /*
2357      * Check the ophdr contains all the data it is supposed to contain.
2358      */
2359     len = be32_to_cpu(ohead->oh_len);
2360     if (dp + len > end) {
2361         xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2362         WARN_ON(1);
2363         return -EFSCORRUPTED;
2364     }
2365 
2366     trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2367     if (!trans) {
2368         /* nothing to do, so skip over this ophdr */
2369         return 0;
2370     }
2371 
2372     /*
2373      * The recovered buffer queue is drained only once we know that all
2374      * recovery items for the current LSN have been processed. This is
2375      * required because:
2376      *
2377      * - Buffer write submission updates the metadata LSN of the buffer.
2378      * - Log recovery skips items with a metadata LSN >= the current LSN of
2379      *   the recovery item.
2380      * - Separate recovery items against the same metadata buffer can share
2381      *   a current LSN. I.e., consider that the LSN of a recovery item is
2382      *   defined as the starting LSN of the first record in which its
2383      *   transaction appears, that a record can hold multiple transactions,
2384      *   and/or that a transaction can span multiple records.
2385      *
2386      * In other words, we are allowed to submit a buffer from log recovery
2387      * once per current LSN. Otherwise, we may incorrectly skip recovery
2388      * items and cause corruption.
2389      *
2390      * We don't know up front whether buffers are updated multiple times per
2391      * LSN. Therefore, track the current LSN of each commit log record as it
2392      * is processed and drain the queue when it changes. Use commit records
2393      * because they are ordered correctly by the logging code.
2394      */
2395     if (log->l_recovery_lsn != trans->r_lsn &&
2396         ohead->oh_flags & XLOG_COMMIT_TRANS) {
2397         error = xfs_buf_delwri_submit(buffer_list);
2398         if (error)
2399             return error;
2400         log->l_recovery_lsn = trans->r_lsn;
2401     }
2402 
2403     return xlog_recovery_process_trans(log, trans, dp, len,
2404                        ohead->oh_flags, pass, buffer_list);
2405 }
2406 
2407 /*
2408  * There are two valid states of the r_state field.  0 indicates that the
2409  * transaction structure is in a normal state.  We have either seen the
2410  * start of the transaction or the last operation we added was not a partial
2411  * operation.  If the last operation we added to the transaction was a
2412  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2413  *
2414  * NOTE: skip LRs with 0 data length.
2415  */
2416 STATIC int
2417 xlog_recover_process_data(
2418     struct xlog     *log,
2419     struct hlist_head   rhash[],
2420     struct xlog_rec_header  *rhead,
2421     char            *dp,
2422     int         pass,
2423     struct list_head    *buffer_list)
2424 {
2425     struct xlog_op_header   *ohead;
2426     char            *end;
2427     int         num_logops;
2428     int         error;
2429 
2430     end = dp + be32_to_cpu(rhead->h_len);
2431     num_logops = be32_to_cpu(rhead->h_num_logops);
2432 
2433     /* check the log format matches our own - else we can't recover */
2434     if (xlog_header_check_recover(log->l_mp, rhead))
2435         return -EIO;
2436 
2437     trace_xfs_log_recover_record(log, rhead, pass);
2438     while ((dp < end) && num_logops) {
2439 
2440         ohead = (struct xlog_op_header *)dp;
2441         dp += sizeof(*ohead);
2442         ASSERT(dp <= end);
2443 
2444         /* errors will abort recovery */
2445         error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2446                            dp, end, pass, buffer_list);
2447         if (error)
2448             return error;
2449 
2450         dp += be32_to_cpu(ohead->oh_len);
2451         num_logops--;
2452     }
2453     return 0;
2454 }
2455 
2456 /* Take all the collected deferred ops and finish them in order. */
2457 static int
2458 xlog_finish_defer_ops(
2459     struct xfs_mount    *mp,
2460     struct list_head    *capture_list)
2461 {
2462     struct xfs_defer_capture *dfc, *next;
2463     struct xfs_trans    *tp;
2464     int         error = 0;
2465 
2466     list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2467         struct xfs_trans_res    resv;
2468         struct xfs_defer_resources dres;
2469 
2470         /*
2471          * Create a new transaction reservation from the captured
2472          * information.  Set logcount to 1 to force the new transaction
2473          * to regrant every roll so that we can make forward progress
2474          * in recovery no matter how full the log might be.
2475          */
2476         resv.tr_logres = dfc->dfc_logres;
2477         resv.tr_logcount = 1;
2478         resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2479 
2480         error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2481                 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2482         if (error) {
2483             xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR);
2484             return error;
2485         }
2486 
2487         /*
2488          * Transfer to this new transaction all the dfops we captured
2489          * from recovering a single intent item.
2490          */
2491         list_del_init(&dfc->dfc_list);
2492         xfs_defer_ops_continue(dfc, tp, &dres);
2493         error = xfs_trans_commit(tp);
2494         xfs_defer_resources_rele(&dres);
2495         if (error)
2496             return error;
2497     }
2498 
2499     ASSERT(list_empty(capture_list));
2500     return 0;
2501 }
2502 
2503 /* Release all the captured defer ops and capture structures in this list. */
2504 static void
2505 xlog_abort_defer_ops(
2506     struct xfs_mount        *mp,
2507     struct list_head        *capture_list)
2508 {
2509     struct xfs_defer_capture    *dfc;
2510     struct xfs_defer_capture    *next;
2511 
2512     list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2513         list_del_init(&dfc->dfc_list);
2514         xfs_defer_ops_capture_free(mp, dfc);
2515     }
2516 }
2517 
2518 /*
2519  * When this is called, all of the log intent items which did not have
2520  * corresponding log done items should be in the AIL.  What we do now is update
2521  * the data structures associated with each one.
2522  *
2523  * Since we process the log intent items in normal transactions, they will be
2524  * removed at some point after the commit.  This prevents us from just walking
2525  * down the list processing each one.  We'll use a flag in the intent item to
2526  * skip those that we've already processed and use the AIL iteration mechanism's
2527  * generation count to try to speed this up at least a bit.
2528  *
2529  * When we start, we know that the intents are the only things in the AIL. As we
2530  * process them, however, other items are added to the AIL. Hence we know we
2531  * have started recovery on all the pending intents when we find an non-intent
2532  * item in the AIL.
2533  */
2534 STATIC int
2535 xlog_recover_process_intents(
2536     struct xlog     *log)
2537 {
2538     LIST_HEAD(capture_list);
2539     struct xfs_ail_cursor   cur;
2540     struct xfs_log_item *lip;
2541     struct xfs_ail      *ailp;
2542     int         error = 0;
2543 #if defined(DEBUG) || defined(XFS_WARN)
2544     xfs_lsn_t       last_lsn;
2545 #endif
2546 
2547     ailp = log->l_ailp;
2548     spin_lock(&ailp->ail_lock);
2549 #if defined(DEBUG) || defined(XFS_WARN)
2550     last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2551 #endif
2552     for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2553          lip != NULL;
2554          lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
2555         if (!xlog_item_is_intent(lip))
2556             break;
2557 
2558         /*
2559          * We should never see a redo item with a LSN higher than
2560          * the last transaction we found in the log at the start
2561          * of recovery.
2562          */
2563         ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2564 
2565         /*
2566          * NOTE: If your intent processing routine can create more
2567          * deferred ops, you /must/ attach them to the capture list in
2568          * the recover routine or else those subsequent intents will be
2569          * replayed in the wrong order!
2570          */
2571         spin_unlock(&ailp->ail_lock);
2572         error = lip->li_ops->iop_recover(lip, &capture_list);
2573         spin_lock(&ailp->ail_lock);
2574         if (error) {
2575             trace_xlog_intent_recovery_failed(log->l_mp, error,
2576                     lip->li_ops->iop_recover);
2577             break;
2578         }
2579     }
2580 
2581     xfs_trans_ail_cursor_done(&cur);
2582     spin_unlock(&ailp->ail_lock);
2583     if (error)
2584         goto err;
2585 
2586     error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2587     if (error)
2588         goto err;
2589 
2590     return 0;
2591 err:
2592     xlog_abort_defer_ops(log->l_mp, &capture_list);
2593     return error;
2594 }
2595 
2596 /*
2597  * A cancel occurs when the mount has failed and we're bailing out.  Release all
2598  * pending log intent items that we haven't started recovery on so they don't
2599  * pin the AIL.
2600  */
2601 STATIC void
2602 xlog_recover_cancel_intents(
2603     struct xlog     *log)
2604 {
2605     struct xfs_log_item *lip;
2606     struct xfs_ail_cursor   cur;
2607     struct xfs_ail      *ailp;
2608 
2609     ailp = log->l_ailp;
2610     spin_lock(&ailp->ail_lock);
2611     lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2612     while (lip != NULL) {
2613         if (!xlog_item_is_intent(lip))
2614             break;
2615 
2616         spin_unlock(&ailp->ail_lock);
2617         lip->li_ops->iop_release(lip);
2618         spin_lock(&ailp->ail_lock);
2619         lip = xfs_trans_ail_cursor_next(ailp, &cur);
2620     }
2621 
2622     xfs_trans_ail_cursor_done(&cur);
2623     spin_unlock(&ailp->ail_lock);
2624 }
2625 
2626 /*
2627  * This routine performs a transaction to null out a bad inode pointer
2628  * in an agi unlinked inode hash bucket.
2629  */
2630 STATIC void
2631 xlog_recover_clear_agi_bucket(
2632     struct xfs_perag    *pag,
2633     int         bucket)
2634 {
2635     struct xfs_mount    *mp = pag->pag_mount;
2636     struct xfs_trans    *tp;
2637     struct xfs_agi      *agi;
2638     struct xfs_buf      *agibp;
2639     int         offset;
2640     int         error;
2641 
2642     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2643     if (error)
2644         goto out_error;
2645 
2646     error = xfs_read_agi(pag, tp, &agibp);
2647     if (error)
2648         goto out_abort;
2649 
2650     agi = agibp->b_addr;
2651     agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2652     offset = offsetof(xfs_agi_t, agi_unlinked) +
2653          (sizeof(xfs_agino_t) * bucket);
2654     xfs_trans_log_buf(tp, agibp, offset,
2655               (offset + sizeof(xfs_agino_t) - 1));
2656 
2657     error = xfs_trans_commit(tp);
2658     if (error)
2659         goto out_error;
2660     return;
2661 
2662 out_abort:
2663     xfs_trans_cancel(tp);
2664 out_error:
2665     xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__,
2666             pag->pag_agno);
2667     return;
2668 }
2669 
2670 static int
2671 xlog_recover_iunlink_bucket(
2672     struct xfs_perag    *pag,
2673     struct xfs_agi      *agi,
2674     int         bucket)
2675 {
2676     struct xfs_mount    *mp = pag->pag_mount;
2677     struct xfs_inode    *prev_ip = NULL;
2678     struct xfs_inode    *ip;
2679     xfs_agino_t     prev_agino, agino;
2680     int         error = 0;
2681 
2682     agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2683     while (agino != NULLAGINO) {
2684         error = xfs_iget(mp, NULL,
2685                 XFS_AGINO_TO_INO(mp, pag->pag_agno, agino),
2686                 0, 0, &ip);
2687         if (error)
2688             break;
2689 
2690         ASSERT(VFS_I(ip)->i_nlink == 0);
2691         ASSERT(VFS_I(ip)->i_mode != 0);
2692         xfs_iflags_clear(ip, XFS_IRECOVERY);
2693         agino = ip->i_next_unlinked;
2694 
2695         if (prev_ip) {
2696             ip->i_prev_unlinked = prev_agino;
2697             xfs_irele(prev_ip);
2698 
2699             /*
2700              * Ensure the inode is removed from the unlinked list
2701              * before we continue so that it won't race with
2702              * building the in-memory list here. This could be
2703              * serialised with the agibp lock, but that just
2704              * serialises via lockstepping and it's much simpler
2705              * just to flush the inodegc queue and wait for it to
2706              * complete.
2707              */
2708             xfs_inodegc_flush(mp);
2709         }
2710 
2711         prev_agino = agino;
2712         prev_ip = ip;
2713     }
2714 
2715     if (prev_ip) {
2716         ip->i_prev_unlinked = prev_agino;
2717         xfs_irele(prev_ip);
2718     }
2719     xfs_inodegc_flush(mp);
2720     return error;
2721 }
2722 
2723 /*
2724  * Recover AGI unlinked lists
2725  *
2726  * This is called during recovery to process any inodes which we unlinked but
2727  * not freed when the system crashed.  These inodes will be on the lists in the
2728  * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2729  * any inodes found on the lists. Each inode is removed from the lists when it
2730  * has been fully truncated and is freed. The freeing of the inode and its
2731  * removal from the list must be atomic.
2732  *
2733  * If everything we touch in the agi processing loop is already in memory, this
2734  * loop can hold the cpu for a long time. It runs without lock contention,
2735  * memory allocation contention, the need wait for IO, etc, and so will run
2736  * until we either run out of inodes to process, run low on memory or we run out
2737  * of log space.
2738  *
2739  * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2740  * and can prevent other filesystem work (such as CIL pushes) from running. This
2741  * can lead to deadlocks if the recovery process runs out of log reservation
2742  * space. Hence we need to yield the CPU when there is other kernel work
2743  * scheduled on this CPU to ensure other scheduled work can run without undue
2744  * latency.
2745  */
2746 static void
2747 xlog_recover_iunlink_ag(
2748     struct xfs_perag    *pag)
2749 {
2750     struct xfs_agi      *agi;
2751     struct xfs_buf      *agibp;
2752     int         bucket;
2753     int         error;
2754 
2755     error = xfs_read_agi(pag, NULL, &agibp);
2756     if (error) {
2757         /*
2758          * AGI is b0rked. Don't process it.
2759          *
2760          * We should probably mark the filesystem as corrupt after we've
2761          * recovered all the ag's we can....
2762          */
2763         return;
2764     }
2765 
2766     /*
2767      * Unlock the buffer so that it can be acquired in the normal course of
2768      * the transaction to truncate and free each inode.  Because we are not
2769      * racing with anyone else here for the AGI buffer, we don't even need
2770      * to hold it locked to read the initial unlinked bucket entries out of
2771      * the buffer. We keep buffer reference though, so that it stays pinned
2772      * in memory while we need the buffer.
2773      */
2774     agi = agibp->b_addr;
2775     xfs_buf_unlock(agibp);
2776 
2777     for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2778         error = xlog_recover_iunlink_bucket(pag, agi, bucket);
2779         if (error) {
2780             /*
2781              * Bucket is unrecoverable, so only a repair scan can
2782              * free the remaining unlinked inodes. Just empty the
2783              * bucket and remaining inodes on it unreferenced and
2784              * unfreeable.
2785              */
2786             xfs_inodegc_flush(pag->pag_mount);
2787             xlog_recover_clear_agi_bucket(pag, bucket);
2788         }
2789     }
2790 
2791     xfs_buf_rele(agibp);
2792 }
2793 
2794 static void
2795 xlog_recover_process_iunlinks(
2796     struct xlog *log)
2797 {
2798     struct xfs_perag    *pag;
2799     xfs_agnumber_t      agno;
2800 
2801     for_each_perag(log->l_mp, agno, pag)
2802         xlog_recover_iunlink_ag(pag);
2803 
2804     /*
2805      * Flush the pending unlinked inodes to ensure that the inactivations
2806      * are fully completed on disk and the incore inodes can be reclaimed
2807      * before we signal that recovery is complete.
2808      */
2809     xfs_inodegc_flush(log->l_mp);
2810 }
2811 
2812 STATIC void
2813 xlog_unpack_data(
2814     struct xlog_rec_header  *rhead,
2815     char            *dp,
2816     struct xlog     *log)
2817 {
2818     int         i, j, k;
2819 
2820     for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2821           i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2822         *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2823         dp += BBSIZE;
2824     }
2825 
2826     if (xfs_has_logv2(log->l_mp)) {
2827         xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2828         for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2829             j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2830             k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2831             *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2832             dp += BBSIZE;
2833         }
2834     }
2835 }
2836 
2837 /*
2838  * CRC check, unpack and process a log record.
2839  */
2840 STATIC int
2841 xlog_recover_process(
2842     struct xlog     *log,
2843     struct hlist_head   rhash[],
2844     struct xlog_rec_header  *rhead,
2845     char            *dp,
2846     int         pass,
2847     struct list_head    *buffer_list)
2848 {
2849     __le32          old_crc = rhead->h_crc;
2850     __le32          crc;
2851 
2852     crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2853 
2854     /*
2855      * Nothing else to do if this is a CRC verification pass. Just return
2856      * if this a record with a non-zero crc. Unfortunately, mkfs always
2857      * sets old_crc to 0 so we must consider this valid even on v5 supers.
2858      * Otherwise, return EFSBADCRC on failure so the callers up the stack
2859      * know precisely what failed.
2860      */
2861     if (pass == XLOG_RECOVER_CRCPASS) {
2862         if (old_crc && crc != old_crc)
2863             return -EFSBADCRC;
2864         return 0;
2865     }
2866 
2867     /*
2868      * We're in the normal recovery path. Issue a warning if and only if the
2869      * CRC in the header is non-zero. This is an advisory warning and the
2870      * zero CRC check prevents warnings from being emitted when upgrading
2871      * the kernel from one that does not add CRCs by default.
2872      */
2873     if (crc != old_crc) {
2874         if (old_crc || xfs_has_crc(log->l_mp)) {
2875             xfs_alert(log->l_mp,
2876         "log record CRC mismatch: found 0x%x, expected 0x%x.",
2877                     le32_to_cpu(old_crc),
2878                     le32_to_cpu(crc));
2879             xfs_hex_dump(dp, 32);
2880         }
2881 
2882         /*
2883          * If the filesystem is CRC enabled, this mismatch becomes a
2884          * fatal log corruption failure.
2885          */
2886         if (xfs_has_crc(log->l_mp)) {
2887             XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2888             return -EFSCORRUPTED;
2889         }
2890     }
2891 
2892     xlog_unpack_data(rhead, dp, log);
2893 
2894     return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2895                      buffer_list);
2896 }
2897 
2898 STATIC int
2899 xlog_valid_rec_header(
2900     struct xlog     *log,
2901     struct xlog_rec_header  *rhead,
2902     xfs_daddr_t     blkno,
2903     int         bufsize)
2904 {
2905     int         hlen;
2906 
2907     if (XFS_IS_CORRUPT(log->l_mp,
2908                rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2909         return -EFSCORRUPTED;
2910     if (XFS_IS_CORRUPT(log->l_mp,
2911                (!rhead->h_version ||
2912                (be32_to_cpu(rhead->h_version) &
2913                 (~XLOG_VERSION_OKBITS))))) {
2914         xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2915             __func__, be32_to_cpu(rhead->h_version));
2916         return -EFSCORRUPTED;
2917     }
2918 
2919     /*
2920      * LR body must have data (or it wouldn't have been written)
2921      * and h_len must not be greater than LR buffer size.
2922      */
2923     hlen = be32_to_cpu(rhead->h_len);
2924     if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2925         return -EFSCORRUPTED;
2926 
2927     if (XFS_IS_CORRUPT(log->l_mp,
2928                blkno > log->l_logBBsize || blkno > INT_MAX))
2929         return -EFSCORRUPTED;
2930     return 0;
2931 }
2932 
2933 /*
2934  * Read the log from tail to head and process the log records found.
2935  * Handle the two cases where the tail and head are in the same cycle
2936  * and where the active portion of the log wraps around the end of
2937  * the physical log separately.  The pass parameter is passed through
2938  * to the routines called to process the data and is not looked at
2939  * here.
2940  */
2941 STATIC int
2942 xlog_do_recovery_pass(
2943     struct xlog     *log,
2944     xfs_daddr_t     head_blk,
2945     xfs_daddr_t     tail_blk,
2946     int         pass,
2947     xfs_daddr_t     *first_bad) /* out: first bad log rec */
2948 {
2949     xlog_rec_header_t   *rhead;
2950     xfs_daddr_t     blk_no, rblk_no;
2951     xfs_daddr_t     rhead_blk;
2952     char            *offset;
2953     char            *hbp, *dbp;
2954     int         error = 0, h_size, h_len;
2955     int         error2 = 0;
2956     int         bblks, split_bblks;
2957     int         hblks, split_hblks, wrapped_hblks;
2958     int         i;
2959     struct hlist_head   rhash[XLOG_RHASH_SIZE];
2960     LIST_HEAD       (buffer_list);
2961 
2962     ASSERT(head_blk != tail_blk);
2963     blk_no = rhead_blk = tail_blk;
2964 
2965     for (i = 0; i < XLOG_RHASH_SIZE; i++)
2966         INIT_HLIST_HEAD(&rhash[i]);
2967 
2968     /*
2969      * Read the header of the tail block and get the iclog buffer size from
2970      * h_size.  Use this to tell how many sectors make up the log header.
2971      */
2972     if (xfs_has_logv2(log->l_mp)) {
2973         /*
2974          * When using variable length iclogs, read first sector of
2975          * iclog header and extract the header size from it.  Get a
2976          * new hbp that is the correct size.
2977          */
2978         hbp = xlog_alloc_buffer(log, 1);
2979         if (!hbp)
2980             return -ENOMEM;
2981 
2982         error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2983         if (error)
2984             goto bread_err1;
2985 
2986         rhead = (xlog_rec_header_t *)offset;
2987 
2988         /*
2989          * xfsprogs has a bug where record length is based on lsunit but
2990          * h_size (iclog size) is hardcoded to 32k. Now that we
2991          * unconditionally CRC verify the unmount record, this means the
2992          * log buffer can be too small for the record and cause an
2993          * overrun.
2994          *
2995          * Detect this condition here. Use lsunit for the buffer size as
2996          * long as this looks like the mkfs case. Otherwise, return an
2997          * error to avoid a buffer overrun.
2998          */
2999         h_size = be32_to_cpu(rhead->h_size);
3000         h_len = be32_to_cpu(rhead->h_len);
3001         if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
3002             rhead->h_num_logops == cpu_to_be32(1)) {
3003             xfs_warn(log->l_mp,
3004         "invalid iclog size (%d bytes), using lsunit (%d bytes)",
3005                  h_size, log->l_mp->m_logbsize);
3006             h_size = log->l_mp->m_logbsize;
3007         }
3008 
3009         error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3010         if (error)
3011             goto bread_err1;
3012 
3013         hblks = xlog_logrec_hblks(log, rhead);
3014         if (hblks != 1) {
3015             kmem_free(hbp);
3016             hbp = xlog_alloc_buffer(log, hblks);
3017         }
3018     } else {
3019         ASSERT(log->l_sectBBsize == 1);
3020         hblks = 1;
3021         hbp = xlog_alloc_buffer(log, 1);
3022         h_size = XLOG_BIG_RECORD_BSIZE;
3023     }
3024 
3025     if (!hbp)
3026         return -ENOMEM;
3027     dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3028     if (!dbp) {
3029         kmem_free(hbp);
3030         return -ENOMEM;
3031     }
3032 
3033     memset(rhash, 0, sizeof(rhash));
3034     if (tail_blk > head_blk) {
3035         /*
3036          * Perform recovery around the end of the physical log.
3037          * When the head is not on the same cycle number as the tail,
3038          * we can't do a sequential recovery.
3039          */
3040         while (blk_no < log->l_logBBsize) {
3041             /*
3042              * Check for header wrapping around physical end-of-log
3043              */
3044             offset = hbp;
3045             split_hblks = 0;
3046             wrapped_hblks = 0;
3047             if (blk_no + hblks <= log->l_logBBsize) {
3048                 /* Read header in one read */
3049                 error = xlog_bread(log, blk_no, hblks, hbp,
3050                            &offset);
3051                 if (error)
3052                     goto bread_err2;
3053             } else {
3054                 /* This LR is split across physical log end */
3055                 if (blk_no != log->l_logBBsize) {
3056                     /* some data before physical log end */
3057                     ASSERT(blk_no <= INT_MAX);
3058                     split_hblks = log->l_logBBsize - (int)blk_no;
3059                     ASSERT(split_hblks > 0);
3060                     error = xlog_bread(log, blk_no,
3061                                split_hblks, hbp,
3062                                &offset);
3063                     if (error)
3064                         goto bread_err2;
3065                 }
3066 
3067                 /*
3068                  * Note: this black magic still works with
3069                  * large sector sizes (non-512) only because:
3070                  * - we increased the buffer size originally
3071                  *   by 1 sector giving us enough extra space
3072                  *   for the second read;
3073                  * - the log start is guaranteed to be sector
3074                  *   aligned;
3075                  * - we read the log end (LR header start)
3076                  *   _first_, then the log start (LR header end)
3077                  *   - order is important.
3078                  */
3079                 wrapped_hblks = hblks - split_hblks;
3080                 error = xlog_bread_noalign(log, 0,
3081                         wrapped_hblks,
3082                         offset + BBTOB(split_hblks));
3083                 if (error)
3084                     goto bread_err2;
3085             }
3086             rhead = (xlog_rec_header_t *)offset;
3087             error = xlog_valid_rec_header(log, rhead,
3088                     split_hblks ? blk_no : 0, h_size);
3089             if (error)
3090                 goto bread_err2;
3091 
3092             bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3093             blk_no += hblks;
3094 
3095             /*
3096              * Read the log record data in multiple reads if it
3097              * wraps around the end of the log. Note that if the
3098              * header already wrapped, blk_no could point past the
3099              * end of the log. The record data is contiguous in
3100              * that case.
3101              */
3102             if (blk_no + bblks <= log->l_logBBsize ||
3103                 blk_no >= log->l_logBBsize) {
3104                 rblk_no = xlog_wrap_logbno(log, blk_no);
3105                 error = xlog_bread(log, rblk_no, bblks, dbp,
3106                            &offset);
3107                 if (error)
3108                     goto bread_err2;
3109             } else {
3110                 /* This log record is split across the
3111                  * physical end of log */
3112                 offset = dbp;
3113                 split_bblks = 0;
3114                 if (blk_no != log->l_logBBsize) {
3115                     /* some data is before the physical
3116                      * end of log */
3117                     ASSERT(!wrapped_hblks);
3118                     ASSERT(blk_no <= INT_MAX);
3119                     split_bblks =
3120                         log->l_logBBsize - (int)blk_no;
3121                     ASSERT(split_bblks > 0);
3122                     error = xlog_bread(log, blk_no,
3123                             split_bblks, dbp,
3124                             &offset);
3125                     if (error)
3126                         goto bread_err2;
3127                 }
3128 
3129                 /*
3130                  * Note: this black magic still works with
3131                  * large sector sizes (non-512) only because:
3132                  * - we increased the buffer size originally
3133                  *   by 1 sector giving us enough extra space
3134                  *   for the second read;
3135                  * - the log start is guaranteed to be sector
3136                  *   aligned;
3137                  * - we read the log end (LR header start)
3138                  *   _first_, then the log start (LR header end)
3139                  *   - order is important.
3140                  */
3141                 error = xlog_bread_noalign(log, 0,
3142                         bblks - split_bblks,
3143                         offset + BBTOB(split_bblks));
3144                 if (error)
3145                     goto bread_err2;
3146             }
3147 
3148             error = xlog_recover_process(log, rhash, rhead, offset,
3149                              pass, &buffer_list);
3150             if (error)
3151                 goto bread_err2;
3152 
3153             blk_no += bblks;
3154             rhead_blk = blk_no;
3155         }
3156 
3157         ASSERT(blk_no >= log->l_logBBsize);
3158         blk_no -= log->l_logBBsize;
3159         rhead_blk = blk_no;
3160     }
3161 
3162     /* read first part of physical log */
3163     while (blk_no < head_blk) {
3164         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3165         if (error)
3166             goto bread_err2;
3167 
3168         rhead = (xlog_rec_header_t *)offset;
3169         error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3170         if (error)
3171             goto bread_err2;
3172 
3173         /* blocks in data section */
3174         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3175         error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3176                    &offset);
3177         if (error)
3178             goto bread_err2;
3179 
3180         error = xlog_recover_process(log, rhash, rhead, offset, pass,
3181                          &buffer_list);
3182         if (error)
3183             goto bread_err2;
3184 
3185         blk_no += bblks + hblks;
3186         rhead_blk = blk_no;
3187     }
3188 
3189  bread_err2:
3190     kmem_free(dbp);
3191  bread_err1:
3192     kmem_free(hbp);
3193 
3194     /*
3195      * Submit buffers that have been added from the last record processed,
3196      * regardless of error status.
3197      */
3198     if (!list_empty(&buffer_list))
3199         error2 = xfs_buf_delwri_submit(&buffer_list);
3200 
3201     if (error && first_bad)
3202         *first_bad = rhead_blk;
3203 
3204     /*
3205      * Transactions are freed at commit time but transactions without commit
3206      * records on disk are never committed. Free any that may be left in the
3207      * hash table.
3208      */
3209     for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3210         struct hlist_node   *tmp;
3211         struct xlog_recover *trans;
3212 
3213         hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3214             xlog_recover_free_trans(trans);
3215     }
3216 
3217     return error ? error : error2;
3218 }
3219 
3220 /*
3221  * Do the recovery of the log.  We actually do this in two phases.
3222  * The two passes are necessary in order to implement the function
3223  * of cancelling a record written into the log.  The first pass
3224  * determines those things which have been cancelled, and the
3225  * second pass replays log items normally except for those which
3226  * have been cancelled.  The handling of the replay and cancellations
3227  * takes place in the log item type specific routines.
3228  *
3229  * The table of items which have cancel records in the log is allocated
3230  * and freed at this level, since only here do we know when all of
3231  * the log recovery has been completed.
3232  */
3233 STATIC int
3234 xlog_do_log_recovery(
3235     struct xlog *log,
3236     xfs_daddr_t head_blk,
3237     xfs_daddr_t tail_blk)
3238 {
3239     int     error;
3240 
3241     ASSERT(head_blk != tail_blk);
3242 
3243     /*
3244      * First do a pass to find all of the cancelled buf log items.
3245      * Store them in the buf_cancel_table for use in the second pass.
3246      */
3247     error = xlog_alloc_buf_cancel_table(log);
3248     if (error)
3249         return error;
3250 
3251     error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3252                       XLOG_RECOVER_PASS1, NULL);
3253     if (error != 0)
3254         goto out_cancel;
3255 
3256     /*
3257      * Then do a second pass to actually recover the items in the log.
3258      * When it is complete free the table of buf cancel items.
3259      */
3260     error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3261                       XLOG_RECOVER_PASS2, NULL);
3262     if (!error)
3263         xlog_check_buf_cancel_table(log);
3264 out_cancel:
3265     xlog_free_buf_cancel_table(log);
3266     return error;
3267 }
3268 
3269 /*
3270  * Do the actual recovery
3271  */
3272 STATIC int
3273 xlog_do_recover(
3274     struct xlog     *log,
3275     xfs_daddr_t     head_blk,
3276     xfs_daddr_t     tail_blk)
3277 {
3278     struct xfs_mount    *mp = log->l_mp;
3279     struct xfs_buf      *bp = mp->m_sb_bp;
3280     struct xfs_sb       *sbp = &mp->m_sb;
3281     int         error;
3282 
3283     trace_xfs_log_recover(log, head_blk, tail_blk);
3284 
3285     /*
3286      * First replay the images in the log.
3287      */
3288     error = xlog_do_log_recovery(log, head_blk, tail_blk);
3289     if (error)
3290         return error;
3291 
3292     if (xlog_is_shutdown(log))
3293         return -EIO;
3294 
3295     /*
3296      * We now update the tail_lsn since much of the recovery has completed
3297      * and there may be space available to use.  If there were no extent
3298      * or iunlinks, we can free up the entire log and set the tail_lsn to
3299      * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3300      * lsn of the last known good LR on disk.  If there are extent frees
3301      * or iunlinks they will have some entries in the AIL; so we look at
3302      * the AIL to determine how to set the tail_lsn.
3303      */
3304     xlog_assign_tail_lsn(mp);
3305 
3306     /*
3307      * Now that we've finished replaying all buffer and inode updates,
3308      * re-read the superblock and reverify it.
3309      */
3310     xfs_buf_lock(bp);
3311     xfs_buf_hold(bp);
3312     error = _xfs_buf_read(bp, XBF_READ);
3313     if (error) {
3314         if (!xlog_is_shutdown(log)) {
3315             xfs_buf_ioerror_alert(bp, __this_address);
3316             ASSERT(0);
3317         }
3318         xfs_buf_relse(bp);
3319         return error;
3320     }
3321 
3322     /* Convert superblock from on-disk format */
3323     xfs_sb_from_disk(sbp, bp->b_addr);
3324     xfs_buf_relse(bp);
3325 
3326     /* re-initialise in-core superblock and geometry structures */
3327     mp->m_features |= xfs_sb_version_to_features(sbp);
3328     xfs_reinit_percpu_counters(mp);
3329     error = xfs_initialize_perag(mp, sbp->sb_agcount, sbp->sb_dblocks,
3330             &mp->m_maxagi);
3331     if (error) {
3332         xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3333         return error;
3334     }
3335     mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3336 
3337     /* Normal transactions can now occur */
3338     clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3339     return 0;
3340 }
3341 
3342 /*
3343  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3344  *
3345  * Return error or zero.
3346  */
3347 int
3348 xlog_recover(
3349     struct xlog *log)
3350 {
3351     xfs_daddr_t head_blk, tail_blk;
3352     int     error;
3353 
3354     /* find the tail of the log */
3355     error = xlog_find_tail(log, &head_blk, &tail_blk);
3356     if (error)
3357         return error;
3358 
3359     /*
3360      * The superblock was read before the log was available and thus the LSN
3361      * could not be verified. Check the superblock LSN against the current
3362      * LSN now that it's known.
3363      */
3364     if (xfs_has_crc(log->l_mp) &&
3365         !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3366         return -EINVAL;
3367 
3368     if (tail_blk != head_blk) {
3369         /* There used to be a comment here:
3370          *
3371          * disallow recovery on read-only mounts.  note -- mount
3372          * checks for ENOSPC and turns it into an intelligent
3373          * error message.
3374          * ...but this is no longer true.  Now, unless you specify
3375          * NORECOVERY (in which case this function would never be
3376          * called), we just go ahead and recover.  We do this all
3377          * under the vfs layer, so we can get away with it unless
3378          * the device itself is read-only, in which case we fail.
3379          */
3380         if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3381             return error;
3382         }
3383 
3384         /*
3385          * Version 5 superblock log feature mask validation. We know the
3386          * log is dirty so check if there are any unknown log features
3387          * in what we need to recover. If there are unknown features
3388          * (e.g. unsupported transactions, then simply reject the
3389          * attempt at recovery before touching anything.
3390          */
3391         if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3392             xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3393                     XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3394             xfs_warn(log->l_mp,
3395 "Superblock has unknown incompatible log features (0x%x) enabled.",
3396                 (log->l_mp->m_sb.sb_features_log_incompat &
3397                     XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3398             xfs_warn(log->l_mp,
3399 "The log can not be fully and/or safely recovered by this kernel.");
3400             xfs_warn(log->l_mp,
3401 "Please recover the log on a kernel that supports the unknown features.");
3402             return -EINVAL;
3403         }
3404 
3405         /*
3406          * Delay log recovery if the debug hook is set. This is debug
3407          * instrumentation to coordinate simulation of I/O failures with
3408          * log recovery.
3409          */
3410         if (xfs_globals.log_recovery_delay) {
3411             xfs_notice(log->l_mp,
3412                 "Delaying log recovery for %d seconds.",
3413                 xfs_globals.log_recovery_delay);
3414             msleep(xfs_globals.log_recovery_delay * 1000);
3415         }
3416 
3417         xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3418                 log->l_mp->m_logname ? log->l_mp->m_logname
3419                              : "internal");
3420 
3421         error = xlog_do_recover(log, head_blk, tail_blk);
3422         set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3423     }
3424     return error;
3425 }
3426 
3427 /*
3428  * In the first part of recovery we replay inodes and buffers and build up the
3429  * list of intents which need to be processed. Here we process the intents and
3430  * clean up the on disk unlinked inode lists. This is separated from the first
3431  * part of recovery so that the root and real-time bitmap inodes can be read in
3432  * from disk in between the two stages.  This is necessary so that we can free
3433  * space in the real-time portion of the file system.
3434  */
3435 int
3436 xlog_recover_finish(
3437     struct xlog *log)
3438 {
3439     int error;
3440 
3441     error = xlog_recover_process_intents(log);
3442     if (error) {
3443         /*
3444          * Cancel all the unprocessed intent items now so that we don't
3445          * leave them pinned in the AIL.  This can cause the AIL to
3446          * livelock on the pinned item if anyone tries to push the AIL
3447          * (inode reclaim does this) before we get around to
3448          * xfs_log_mount_cancel.
3449          */
3450         xlog_recover_cancel_intents(log);
3451         xfs_alert(log->l_mp, "Failed to recover intents");
3452         xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3453         return error;
3454     }
3455 
3456     /*
3457      * Sync the log to get all the intents out of the AIL.  This isn't
3458      * absolutely necessary, but it helps in case the unlink transactions
3459      * would have problems pushing the intents out of the way.
3460      */
3461     xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3462 
3463     /*
3464      * Now that we've recovered the log and all the intents, we can clear
3465      * the log incompat feature bits in the superblock because there's no
3466      * longer anything to protect.  We rely on the AIL push to write out the
3467      * updated superblock after everything else.
3468      */
3469     if (xfs_clear_incompat_log_features(log->l_mp)) {
3470         error = xfs_sync_sb(log->l_mp, false);
3471         if (error < 0) {
3472             xfs_alert(log->l_mp,
3473     "Failed to clear log incompat features on recovery");
3474             return error;
3475         }
3476     }
3477 
3478     xlog_recover_process_iunlinks(log);
3479 
3480     /*
3481      * Recover any CoW staging blocks that are still referenced by the
3482      * ondisk refcount metadata.  During mount there cannot be any live
3483      * staging extents as we have not permitted any user modifications.
3484      * Therefore, it is safe to free them all right now, even on a
3485      * read-only mount.
3486      */
3487     error = xfs_reflink_recover_cow(log->l_mp);
3488     if (error) {
3489         xfs_alert(log->l_mp,
3490     "Failed to recover leftover CoW staging extents, err %d.",
3491                 error);
3492         /*
3493          * If we get an error here, make sure the log is shut down
3494          * but return zero so that any log items committed since the
3495          * end of intents processing can be pushed through the CIL
3496          * and AIL.
3497          */
3498         xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3499     }
3500 
3501     return 0;
3502 }
3503 
3504 void
3505 xlog_recover_cancel(
3506     struct xlog *log)
3507 {
3508     if (xlog_recovery_needed(log))
3509         xlog_recover_cancel_intents(log);
3510 }
3511