Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2016 Oracle.  All Rights Reserved.
0004  * Author: Darrick J. Wong <darrick.wong@oracle.com>
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_format.h"
0009 #include "xfs_log_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_bit.h"
0012 #include "xfs_shared.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_defer.h"
0015 #include "xfs_trans.h"
0016 #include "xfs_trans_priv.h"
0017 #include "xfs_rmap_item.h"
0018 #include "xfs_log.h"
0019 #include "xfs_rmap.h"
0020 #include "xfs_error.h"
0021 #include "xfs_log_priv.h"
0022 #include "xfs_log_recover.h"
0023 
0024 struct kmem_cache   *xfs_rui_cache;
0025 struct kmem_cache   *xfs_rud_cache;
0026 
0027 static const struct xfs_item_ops xfs_rui_item_ops;
0028 
0029 static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
0030 {
0031     return container_of(lip, struct xfs_rui_log_item, rui_item);
0032 }
0033 
0034 STATIC void
0035 xfs_rui_item_free(
0036     struct xfs_rui_log_item *ruip)
0037 {
0038     kmem_free(ruip->rui_item.li_lv_shadow);
0039     if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
0040         kmem_free(ruip);
0041     else
0042         kmem_cache_free(xfs_rui_cache, ruip);
0043 }
0044 
0045 /*
0046  * Freeing the RUI requires that we remove it from the AIL if it has already
0047  * been placed there. However, the RUI may not yet have been placed in the AIL
0048  * when called by xfs_rui_release() from RUD processing due to the ordering of
0049  * committed vs unpin operations in bulk insert operations. Hence the reference
0050  * count to ensure only the last caller frees the RUI.
0051  */
0052 STATIC void
0053 xfs_rui_release(
0054     struct xfs_rui_log_item *ruip)
0055 {
0056     ASSERT(atomic_read(&ruip->rui_refcount) > 0);
0057     if (!atomic_dec_and_test(&ruip->rui_refcount))
0058         return;
0059 
0060     xfs_trans_ail_delete(&ruip->rui_item, 0);
0061     xfs_rui_item_free(ruip);
0062 }
0063 
0064 STATIC void
0065 xfs_rui_item_size(
0066     struct xfs_log_item *lip,
0067     int         *nvecs,
0068     int         *nbytes)
0069 {
0070     struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
0071 
0072     *nvecs += 1;
0073     *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
0074 }
0075 
0076 /*
0077  * This is called to fill in the vector of log iovecs for the
0078  * given rui log item. We use only 1 iovec, and we point that
0079  * at the rui_log_format structure embedded in the rui item.
0080  * It is at this point that we assert that all of the extent
0081  * slots in the rui item have been filled.
0082  */
0083 STATIC void
0084 xfs_rui_item_format(
0085     struct xfs_log_item *lip,
0086     struct xfs_log_vec  *lv)
0087 {
0088     struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
0089     struct xfs_log_iovec    *vecp = NULL;
0090 
0091     ASSERT(atomic_read(&ruip->rui_next_extent) ==
0092             ruip->rui_format.rui_nextents);
0093 
0094     ruip->rui_format.rui_type = XFS_LI_RUI;
0095     ruip->rui_format.rui_size = 1;
0096 
0097     xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
0098             xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
0099 }
0100 
0101 /*
0102  * The unpin operation is the last place an RUI is manipulated in the log. It is
0103  * either inserted in the AIL or aborted in the event of a log I/O error. In
0104  * either case, the RUI transaction has been successfully committed to make it
0105  * this far. Therefore, we expect whoever committed the RUI to either construct
0106  * and commit the RUD or drop the RUD's reference in the event of error. Simply
0107  * drop the log's RUI reference now that the log is done with it.
0108  */
0109 STATIC void
0110 xfs_rui_item_unpin(
0111     struct xfs_log_item *lip,
0112     int         remove)
0113 {
0114     struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
0115 
0116     xfs_rui_release(ruip);
0117 }
0118 
0119 /*
0120  * The RUI has been either committed or aborted if the transaction has been
0121  * cancelled. If the transaction was cancelled, an RUD isn't going to be
0122  * constructed and thus we free the RUI here directly.
0123  */
0124 STATIC void
0125 xfs_rui_item_release(
0126     struct xfs_log_item *lip)
0127 {
0128     xfs_rui_release(RUI_ITEM(lip));
0129 }
0130 
0131 /*
0132  * Allocate and initialize an rui item with the given number of extents.
0133  */
0134 STATIC struct xfs_rui_log_item *
0135 xfs_rui_init(
0136     struct xfs_mount        *mp,
0137     uint                nextents)
0138 
0139 {
0140     struct xfs_rui_log_item     *ruip;
0141 
0142     ASSERT(nextents > 0);
0143     if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
0144         ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
0145     else
0146         ruip = kmem_cache_zalloc(xfs_rui_cache,
0147                      GFP_KERNEL | __GFP_NOFAIL);
0148 
0149     xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
0150     ruip->rui_format.rui_nextents = nextents;
0151     ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
0152     atomic_set(&ruip->rui_next_extent, 0);
0153     atomic_set(&ruip->rui_refcount, 2);
0154 
0155     return ruip;
0156 }
0157 
0158 /*
0159  * Copy an RUI format buffer from the given buf, and into the destination
0160  * RUI format structure.  The RUI/RUD items were designed not to need any
0161  * special alignment handling.
0162  */
0163 STATIC int
0164 xfs_rui_copy_format(
0165     struct xfs_log_iovec        *buf,
0166     struct xfs_rui_log_format   *dst_rui_fmt)
0167 {
0168     struct xfs_rui_log_format   *src_rui_fmt;
0169     uint                len;
0170 
0171     src_rui_fmt = buf->i_addr;
0172     len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
0173 
0174     if (buf->i_len != len) {
0175         XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
0176         return -EFSCORRUPTED;
0177     }
0178 
0179     memcpy(dst_rui_fmt, src_rui_fmt, len);
0180     return 0;
0181 }
0182 
0183 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
0184 {
0185     return container_of(lip, struct xfs_rud_log_item, rud_item);
0186 }
0187 
0188 STATIC void
0189 xfs_rud_item_size(
0190     struct xfs_log_item *lip,
0191     int         *nvecs,
0192     int         *nbytes)
0193 {
0194     *nvecs += 1;
0195     *nbytes += sizeof(struct xfs_rud_log_format);
0196 }
0197 
0198 /*
0199  * This is called to fill in the vector of log iovecs for the
0200  * given rud log item. We use only 1 iovec, and we point that
0201  * at the rud_log_format structure embedded in the rud item.
0202  * It is at this point that we assert that all of the extent
0203  * slots in the rud item have been filled.
0204  */
0205 STATIC void
0206 xfs_rud_item_format(
0207     struct xfs_log_item *lip,
0208     struct xfs_log_vec  *lv)
0209 {
0210     struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
0211     struct xfs_log_iovec    *vecp = NULL;
0212 
0213     rudp->rud_format.rud_type = XFS_LI_RUD;
0214     rudp->rud_format.rud_size = 1;
0215 
0216     xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
0217             sizeof(struct xfs_rud_log_format));
0218 }
0219 
0220 /*
0221  * The RUD is either committed or aborted if the transaction is cancelled. If
0222  * the transaction is cancelled, drop our reference to the RUI and free the
0223  * RUD.
0224  */
0225 STATIC void
0226 xfs_rud_item_release(
0227     struct xfs_log_item *lip)
0228 {
0229     struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
0230 
0231     xfs_rui_release(rudp->rud_ruip);
0232     kmem_free(rudp->rud_item.li_lv_shadow);
0233     kmem_cache_free(xfs_rud_cache, rudp);
0234 }
0235 
0236 static struct xfs_log_item *
0237 xfs_rud_item_intent(
0238     struct xfs_log_item *lip)
0239 {
0240     return &RUD_ITEM(lip)->rud_ruip->rui_item;
0241 }
0242 
0243 static const struct xfs_item_ops xfs_rud_item_ops = {
0244     .flags      = XFS_ITEM_RELEASE_WHEN_COMMITTED |
0245               XFS_ITEM_INTENT_DONE,
0246     .iop_size   = xfs_rud_item_size,
0247     .iop_format = xfs_rud_item_format,
0248     .iop_release    = xfs_rud_item_release,
0249     .iop_intent = xfs_rud_item_intent,
0250 };
0251 
0252 static struct xfs_rud_log_item *
0253 xfs_trans_get_rud(
0254     struct xfs_trans        *tp,
0255     struct xfs_rui_log_item     *ruip)
0256 {
0257     struct xfs_rud_log_item     *rudp;
0258 
0259     rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL);
0260     xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
0261               &xfs_rud_item_ops);
0262     rudp->rud_ruip = ruip;
0263     rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
0264 
0265     xfs_trans_add_item(tp, &rudp->rud_item);
0266     return rudp;
0267 }
0268 
0269 /* Set the map extent flags for this reverse mapping. */
0270 static void
0271 xfs_trans_set_rmap_flags(
0272     struct xfs_map_extent       *rmap,
0273     enum xfs_rmap_intent_type   type,
0274     int             whichfork,
0275     xfs_exntst_t            state)
0276 {
0277     rmap->me_flags = 0;
0278     if (state == XFS_EXT_UNWRITTEN)
0279         rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
0280     if (whichfork == XFS_ATTR_FORK)
0281         rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
0282     switch (type) {
0283     case XFS_RMAP_MAP:
0284         rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
0285         break;
0286     case XFS_RMAP_MAP_SHARED:
0287         rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
0288         break;
0289     case XFS_RMAP_UNMAP:
0290         rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
0291         break;
0292     case XFS_RMAP_UNMAP_SHARED:
0293         rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
0294         break;
0295     case XFS_RMAP_CONVERT:
0296         rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
0297         break;
0298     case XFS_RMAP_CONVERT_SHARED:
0299         rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
0300         break;
0301     case XFS_RMAP_ALLOC:
0302         rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
0303         break;
0304     case XFS_RMAP_FREE:
0305         rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
0306         break;
0307     default:
0308         ASSERT(0);
0309     }
0310 }
0311 
0312 /*
0313  * Finish an rmap update and log it to the RUD. Note that the transaction is
0314  * marked dirty regardless of whether the rmap update succeeds or fails to
0315  * support the RUI/RUD lifecycle rules.
0316  */
0317 static int
0318 xfs_trans_log_finish_rmap_update(
0319     struct xfs_trans        *tp,
0320     struct xfs_rud_log_item     *rudp,
0321     enum xfs_rmap_intent_type   type,
0322     uint64_t            owner,
0323     int             whichfork,
0324     xfs_fileoff_t           startoff,
0325     xfs_fsblock_t           startblock,
0326     xfs_filblks_t           blockcount,
0327     xfs_exntst_t            state,
0328     struct xfs_btree_cur        **pcur)
0329 {
0330     int             error;
0331 
0332     error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
0333             startblock, blockcount, state, pcur);
0334 
0335     /*
0336      * Mark the transaction dirty, even on error. This ensures the
0337      * transaction is aborted, which:
0338      *
0339      * 1.) releases the RUI and frees the RUD
0340      * 2.) shuts down the filesystem
0341      */
0342     tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
0343     set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
0344 
0345     return error;
0346 }
0347 
0348 /* Sort rmap intents by AG. */
0349 static int
0350 xfs_rmap_update_diff_items(
0351     void                *priv,
0352     const struct list_head      *a,
0353     const struct list_head      *b)
0354 {
0355     struct xfs_mount        *mp = priv;
0356     struct xfs_rmap_intent      *ra;
0357     struct xfs_rmap_intent      *rb;
0358 
0359     ra = container_of(a, struct xfs_rmap_intent, ri_list);
0360     rb = container_of(b, struct xfs_rmap_intent, ri_list);
0361     return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
0362         XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
0363 }
0364 
0365 /* Log rmap updates in the intent item. */
0366 STATIC void
0367 xfs_rmap_update_log_item(
0368     struct xfs_trans        *tp,
0369     struct xfs_rui_log_item     *ruip,
0370     struct xfs_rmap_intent      *rmap)
0371 {
0372     uint                next_extent;
0373     struct xfs_map_extent       *map;
0374 
0375     tp->t_flags |= XFS_TRANS_DIRTY;
0376     set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
0377 
0378     /*
0379      * atomic_inc_return gives us the value after the increment;
0380      * we want to use it as an array index so we need to subtract 1 from
0381      * it.
0382      */
0383     next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
0384     ASSERT(next_extent < ruip->rui_format.rui_nextents);
0385     map = &ruip->rui_format.rui_extents[next_extent];
0386     map->me_owner = rmap->ri_owner;
0387     map->me_startblock = rmap->ri_bmap.br_startblock;
0388     map->me_startoff = rmap->ri_bmap.br_startoff;
0389     map->me_len = rmap->ri_bmap.br_blockcount;
0390     xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
0391             rmap->ri_bmap.br_state);
0392 }
0393 
0394 static struct xfs_log_item *
0395 xfs_rmap_update_create_intent(
0396     struct xfs_trans        *tp,
0397     struct list_head        *items,
0398     unsigned int            count,
0399     bool                sort)
0400 {
0401     struct xfs_mount        *mp = tp->t_mountp;
0402     struct xfs_rui_log_item     *ruip = xfs_rui_init(mp, count);
0403     struct xfs_rmap_intent      *rmap;
0404 
0405     ASSERT(count > 0);
0406 
0407     xfs_trans_add_item(tp, &ruip->rui_item);
0408     if (sort)
0409         list_sort(mp, items, xfs_rmap_update_diff_items);
0410     list_for_each_entry(rmap, items, ri_list)
0411         xfs_rmap_update_log_item(tp, ruip, rmap);
0412     return &ruip->rui_item;
0413 }
0414 
0415 /* Get an RUD so we can process all the deferred rmap updates. */
0416 static struct xfs_log_item *
0417 xfs_rmap_update_create_done(
0418     struct xfs_trans        *tp,
0419     struct xfs_log_item     *intent,
0420     unsigned int            count)
0421 {
0422     return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
0423 }
0424 
0425 /* Process a deferred rmap update. */
0426 STATIC int
0427 xfs_rmap_update_finish_item(
0428     struct xfs_trans        *tp,
0429     struct xfs_log_item     *done,
0430     struct list_head        *item,
0431     struct xfs_btree_cur        **state)
0432 {
0433     struct xfs_rmap_intent      *rmap;
0434     int             error;
0435 
0436     rmap = container_of(item, struct xfs_rmap_intent, ri_list);
0437     error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
0438             rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
0439             rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
0440             rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
0441             state);
0442     kmem_cache_free(xfs_rmap_intent_cache, rmap);
0443     return error;
0444 }
0445 
0446 /* Abort all pending RUIs. */
0447 STATIC void
0448 xfs_rmap_update_abort_intent(
0449     struct xfs_log_item *intent)
0450 {
0451     xfs_rui_release(RUI_ITEM(intent));
0452 }
0453 
0454 /* Cancel a deferred rmap update. */
0455 STATIC void
0456 xfs_rmap_update_cancel_item(
0457     struct list_head        *item)
0458 {
0459     struct xfs_rmap_intent      *rmap;
0460 
0461     rmap = container_of(item, struct xfs_rmap_intent, ri_list);
0462     kmem_cache_free(xfs_rmap_intent_cache, rmap);
0463 }
0464 
0465 const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
0466     .max_items  = XFS_RUI_MAX_FAST_EXTENTS,
0467     .create_intent  = xfs_rmap_update_create_intent,
0468     .abort_intent   = xfs_rmap_update_abort_intent,
0469     .create_done    = xfs_rmap_update_create_done,
0470     .finish_item    = xfs_rmap_update_finish_item,
0471     .finish_cleanup = xfs_rmap_finish_one_cleanup,
0472     .cancel_item    = xfs_rmap_update_cancel_item,
0473 };
0474 
0475 /* Is this recovered RUI ok? */
0476 static inline bool
0477 xfs_rui_validate_map(
0478     struct xfs_mount        *mp,
0479     struct xfs_map_extent       *rmap)
0480 {
0481     if (!xfs_has_rmapbt(mp))
0482         return false;
0483 
0484     if (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)
0485         return false;
0486 
0487     switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
0488     case XFS_RMAP_EXTENT_MAP:
0489     case XFS_RMAP_EXTENT_MAP_SHARED:
0490     case XFS_RMAP_EXTENT_UNMAP:
0491     case XFS_RMAP_EXTENT_UNMAP_SHARED:
0492     case XFS_RMAP_EXTENT_CONVERT:
0493     case XFS_RMAP_EXTENT_CONVERT_SHARED:
0494     case XFS_RMAP_EXTENT_ALLOC:
0495     case XFS_RMAP_EXTENT_FREE:
0496         break;
0497     default:
0498         return false;
0499     }
0500 
0501     if (!XFS_RMAP_NON_INODE_OWNER(rmap->me_owner) &&
0502         !xfs_verify_ino(mp, rmap->me_owner))
0503         return false;
0504 
0505     if (!xfs_verify_fileext(mp, rmap->me_startoff, rmap->me_len))
0506         return false;
0507 
0508     return xfs_verify_fsbext(mp, rmap->me_startblock, rmap->me_len);
0509 }
0510 
0511 /*
0512  * Process an rmap update intent item that was recovered from the log.
0513  * We need to update the rmapbt.
0514  */
0515 STATIC int
0516 xfs_rui_item_recover(
0517     struct xfs_log_item     *lip,
0518     struct list_head        *capture_list)
0519 {
0520     struct xfs_rui_log_item     *ruip = RUI_ITEM(lip);
0521     struct xfs_map_extent       *rmap;
0522     struct xfs_rud_log_item     *rudp;
0523     struct xfs_trans        *tp;
0524     struct xfs_btree_cur        *rcur = NULL;
0525     struct xfs_mount        *mp = lip->li_log->l_mp;
0526     enum xfs_rmap_intent_type   type;
0527     xfs_exntst_t            state;
0528     int             i;
0529     int             whichfork;
0530     int             error = 0;
0531 
0532     /*
0533      * First check the validity of the extents described by the
0534      * RUI.  If any are bad, then assume that all are bad and
0535      * just toss the RUI.
0536      */
0537     for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
0538         if (!xfs_rui_validate_map(mp,
0539                     &ruip->rui_format.rui_extents[i])) {
0540             XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
0541                     &ruip->rui_format,
0542                     sizeof(ruip->rui_format));
0543             return -EFSCORRUPTED;
0544         }
0545     }
0546 
0547     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
0548             mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
0549     if (error)
0550         return error;
0551     rudp = xfs_trans_get_rud(tp, ruip);
0552 
0553     for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
0554         rmap = &ruip->rui_format.rui_extents[i];
0555         state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
0556                 XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
0557         whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
0558                 XFS_ATTR_FORK : XFS_DATA_FORK;
0559         switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
0560         case XFS_RMAP_EXTENT_MAP:
0561             type = XFS_RMAP_MAP;
0562             break;
0563         case XFS_RMAP_EXTENT_MAP_SHARED:
0564             type = XFS_RMAP_MAP_SHARED;
0565             break;
0566         case XFS_RMAP_EXTENT_UNMAP:
0567             type = XFS_RMAP_UNMAP;
0568             break;
0569         case XFS_RMAP_EXTENT_UNMAP_SHARED:
0570             type = XFS_RMAP_UNMAP_SHARED;
0571             break;
0572         case XFS_RMAP_EXTENT_CONVERT:
0573             type = XFS_RMAP_CONVERT;
0574             break;
0575         case XFS_RMAP_EXTENT_CONVERT_SHARED:
0576             type = XFS_RMAP_CONVERT_SHARED;
0577             break;
0578         case XFS_RMAP_EXTENT_ALLOC:
0579             type = XFS_RMAP_ALLOC;
0580             break;
0581         case XFS_RMAP_EXTENT_FREE:
0582             type = XFS_RMAP_FREE;
0583             break;
0584         default:
0585             XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
0586             error = -EFSCORRUPTED;
0587             goto abort_error;
0588         }
0589         error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
0590                 rmap->me_owner, whichfork,
0591                 rmap->me_startoff, rmap->me_startblock,
0592                 rmap->me_len, state, &rcur);
0593         if (error == -EFSCORRUPTED)
0594             XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
0595                     rmap, sizeof(*rmap));
0596         if (error)
0597             goto abort_error;
0598 
0599     }
0600 
0601     xfs_rmap_finish_one_cleanup(tp, rcur, error);
0602     return xfs_defer_ops_capture_and_commit(tp, capture_list);
0603 
0604 abort_error:
0605     xfs_rmap_finish_one_cleanup(tp, rcur, error);
0606     xfs_trans_cancel(tp);
0607     return error;
0608 }
0609 
0610 STATIC bool
0611 xfs_rui_item_match(
0612     struct xfs_log_item *lip,
0613     uint64_t        intent_id)
0614 {
0615     return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
0616 }
0617 
0618 /* Relog an intent item to push the log tail forward. */
0619 static struct xfs_log_item *
0620 xfs_rui_item_relog(
0621     struct xfs_log_item     *intent,
0622     struct xfs_trans        *tp)
0623 {
0624     struct xfs_rud_log_item     *rudp;
0625     struct xfs_rui_log_item     *ruip;
0626     struct xfs_map_extent       *extp;
0627     unsigned int            count;
0628 
0629     count = RUI_ITEM(intent)->rui_format.rui_nextents;
0630     extp = RUI_ITEM(intent)->rui_format.rui_extents;
0631 
0632     tp->t_flags |= XFS_TRANS_DIRTY;
0633     rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
0634     set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
0635 
0636     ruip = xfs_rui_init(tp->t_mountp, count);
0637     memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
0638     atomic_set(&ruip->rui_next_extent, count);
0639     xfs_trans_add_item(tp, &ruip->rui_item);
0640     set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
0641     return &ruip->rui_item;
0642 }
0643 
0644 static const struct xfs_item_ops xfs_rui_item_ops = {
0645     .flags      = XFS_ITEM_INTENT,
0646     .iop_size   = xfs_rui_item_size,
0647     .iop_format = xfs_rui_item_format,
0648     .iop_unpin  = xfs_rui_item_unpin,
0649     .iop_release    = xfs_rui_item_release,
0650     .iop_recover    = xfs_rui_item_recover,
0651     .iop_match  = xfs_rui_item_match,
0652     .iop_relog  = xfs_rui_item_relog,
0653 };
0654 
0655 /*
0656  * This routine is called to create an in-core extent rmap update
0657  * item from the rui format structure which was logged on disk.
0658  * It allocates an in-core rui, copies the extents from the format
0659  * structure into it, and adds the rui to the AIL with the given
0660  * LSN.
0661  */
0662 STATIC int
0663 xlog_recover_rui_commit_pass2(
0664     struct xlog         *log,
0665     struct list_head        *buffer_list,
0666     struct xlog_recover_item    *item,
0667     xfs_lsn_t           lsn)
0668 {
0669     int             error;
0670     struct xfs_mount        *mp = log->l_mp;
0671     struct xfs_rui_log_item     *ruip;
0672     struct xfs_rui_log_format   *rui_formatp;
0673 
0674     rui_formatp = item->ri_buf[0].i_addr;
0675 
0676     ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
0677     error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
0678     if (error) {
0679         xfs_rui_item_free(ruip);
0680         return error;
0681     }
0682     atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
0683     /*
0684      * Insert the intent into the AIL directly and drop one reference so
0685      * that finishing or canceling the work will drop the other.
0686      */
0687     xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
0688     xfs_rui_release(ruip);
0689     return 0;
0690 }
0691 
0692 const struct xlog_recover_item_ops xlog_rui_item_ops = {
0693     .item_type      = XFS_LI_RUI,
0694     .commit_pass2       = xlog_recover_rui_commit_pass2,
0695 };
0696 
0697 /*
0698  * This routine is called when an RUD format structure is found in a committed
0699  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
0700  * was still in the log. To do this it searches the AIL for the RUI with an id
0701  * equal to that in the RUD format structure. If we find it we drop the RUD
0702  * reference, which removes the RUI from the AIL and frees it.
0703  */
0704 STATIC int
0705 xlog_recover_rud_commit_pass2(
0706     struct xlog         *log,
0707     struct list_head        *buffer_list,
0708     struct xlog_recover_item    *item,
0709     xfs_lsn_t           lsn)
0710 {
0711     struct xfs_rud_log_format   *rud_formatp;
0712 
0713     rud_formatp = item->ri_buf[0].i_addr;
0714     ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
0715 
0716     xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
0717     return 0;
0718 }
0719 
0720 const struct xlog_recover_item_ops xlog_rud_item_ops = {
0721     .item_type      = XFS_LI_RUD,
0722     .commit_pass2       = xlog_recover_rud_commit_pass2,
0723 };