Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2016 Oracle.  All Rights Reserved.
0004  * Author: Darrick J. Wong <darrick.wong@oracle.com>
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_format.h"
0009 #include "xfs_log_format.h"
0010 #include "xfs_trans_resv.h"
0011 #include "xfs_bit.h"
0012 #include "xfs_shared.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_defer.h"
0015 #include "xfs_trans.h"
0016 #include "xfs_trans_priv.h"
0017 #include "xfs_refcount_item.h"
0018 #include "xfs_log.h"
0019 #include "xfs_refcount.h"
0020 #include "xfs_error.h"
0021 #include "xfs_log_priv.h"
0022 #include "xfs_log_recover.h"
0023 
0024 struct kmem_cache   *xfs_cui_cache;
0025 struct kmem_cache   *xfs_cud_cache;
0026 
0027 static const struct xfs_item_ops xfs_cui_item_ops;
0028 
0029 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
0030 {
0031     return container_of(lip, struct xfs_cui_log_item, cui_item);
0032 }
0033 
0034 STATIC void
0035 xfs_cui_item_free(
0036     struct xfs_cui_log_item *cuip)
0037 {
0038     kmem_free(cuip->cui_item.li_lv_shadow);
0039     if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
0040         kmem_free(cuip);
0041     else
0042         kmem_cache_free(xfs_cui_cache, cuip);
0043 }
0044 
0045 /*
0046  * Freeing the CUI requires that we remove it from the AIL if it has already
0047  * been placed there. However, the CUI may not yet have been placed in the AIL
0048  * when called by xfs_cui_release() from CUD processing due to the ordering of
0049  * committed vs unpin operations in bulk insert operations. Hence the reference
0050  * count to ensure only the last caller frees the CUI.
0051  */
0052 STATIC void
0053 xfs_cui_release(
0054     struct xfs_cui_log_item *cuip)
0055 {
0056     ASSERT(atomic_read(&cuip->cui_refcount) > 0);
0057     if (!atomic_dec_and_test(&cuip->cui_refcount))
0058         return;
0059 
0060     xfs_trans_ail_delete(&cuip->cui_item, 0);
0061     xfs_cui_item_free(cuip);
0062 }
0063 
0064 
0065 STATIC void
0066 xfs_cui_item_size(
0067     struct xfs_log_item *lip,
0068     int         *nvecs,
0069     int         *nbytes)
0070 {
0071     struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
0072 
0073     *nvecs += 1;
0074     *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
0075 }
0076 
0077 /*
0078  * This is called to fill in the vector of log iovecs for the
0079  * given cui log item. We use only 1 iovec, and we point that
0080  * at the cui_log_format structure embedded in the cui item.
0081  * It is at this point that we assert that all of the extent
0082  * slots in the cui item have been filled.
0083  */
0084 STATIC void
0085 xfs_cui_item_format(
0086     struct xfs_log_item *lip,
0087     struct xfs_log_vec  *lv)
0088 {
0089     struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
0090     struct xfs_log_iovec    *vecp = NULL;
0091 
0092     ASSERT(atomic_read(&cuip->cui_next_extent) ==
0093             cuip->cui_format.cui_nextents);
0094 
0095     cuip->cui_format.cui_type = XFS_LI_CUI;
0096     cuip->cui_format.cui_size = 1;
0097 
0098     xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
0099             xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
0100 }
0101 
0102 /*
0103  * The unpin operation is the last place an CUI is manipulated in the log. It is
0104  * either inserted in the AIL or aborted in the event of a log I/O error. In
0105  * either case, the CUI transaction has been successfully committed to make it
0106  * this far. Therefore, we expect whoever committed the CUI to either construct
0107  * and commit the CUD or drop the CUD's reference in the event of error. Simply
0108  * drop the log's CUI reference now that the log is done with it.
0109  */
0110 STATIC void
0111 xfs_cui_item_unpin(
0112     struct xfs_log_item *lip,
0113     int         remove)
0114 {
0115     struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
0116 
0117     xfs_cui_release(cuip);
0118 }
0119 
0120 /*
0121  * The CUI has been either committed or aborted if the transaction has been
0122  * cancelled. If the transaction was cancelled, an CUD isn't going to be
0123  * constructed and thus we free the CUI here directly.
0124  */
0125 STATIC void
0126 xfs_cui_item_release(
0127     struct xfs_log_item *lip)
0128 {
0129     xfs_cui_release(CUI_ITEM(lip));
0130 }
0131 
0132 /*
0133  * Allocate and initialize an cui item with the given number of extents.
0134  */
0135 STATIC struct xfs_cui_log_item *
0136 xfs_cui_init(
0137     struct xfs_mount        *mp,
0138     uint                nextents)
0139 
0140 {
0141     struct xfs_cui_log_item     *cuip;
0142 
0143     ASSERT(nextents > 0);
0144     if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
0145         cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
0146                 0);
0147     else
0148         cuip = kmem_cache_zalloc(xfs_cui_cache,
0149                      GFP_KERNEL | __GFP_NOFAIL);
0150 
0151     xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
0152     cuip->cui_format.cui_nextents = nextents;
0153     cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
0154     atomic_set(&cuip->cui_next_extent, 0);
0155     atomic_set(&cuip->cui_refcount, 2);
0156 
0157     return cuip;
0158 }
0159 
0160 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
0161 {
0162     return container_of(lip, struct xfs_cud_log_item, cud_item);
0163 }
0164 
0165 STATIC void
0166 xfs_cud_item_size(
0167     struct xfs_log_item *lip,
0168     int         *nvecs,
0169     int         *nbytes)
0170 {
0171     *nvecs += 1;
0172     *nbytes += sizeof(struct xfs_cud_log_format);
0173 }
0174 
0175 /*
0176  * This is called to fill in the vector of log iovecs for the
0177  * given cud log item. We use only 1 iovec, and we point that
0178  * at the cud_log_format structure embedded in the cud item.
0179  * It is at this point that we assert that all of the extent
0180  * slots in the cud item have been filled.
0181  */
0182 STATIC void
0183 xfs_cud_item_format(
0184     struct xfs_log_item *lip,
0185     struct xfs_log_vec  *lv)
0186 {
0187     struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
0188     struct xfs_log_iovec    *vecp = NULL;
0189 
0190     cudp->cud_format.cud_type = XFS_LI_CUD;
0191     cudp->cud_format.cud_size = 1;
0192 
0193     xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
0194             sizeof(struct xfs_cud_log_format));
0195 }
0196 
0197 /*
0198  * The CUD is either committed or aborted if the transaction is cancelled. If
0199  * the transaction is cancelled, drop our reference to the CUI and free the
0200  * CUD.
0201  */
0202 STATIC void
0203 xfs_cud_item_release(
0204     struct xfs_log_item *lip)
0205 {
0206     struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
0207 
0208     xfs_cui_release(cudp->cud_cuip);
0209     kmem_free(cudp->cud_item.li_lv_shadow);
0210     kmem_cache_free(xfs_cud_cache, cudp);
0211 }
0212 
0213 static struct xfs_log_item *
0214 xfs_cud_item_intent(
0215     struct xfs_log_item *lip)
0216 {
0217     return &CUD_ITEM(lip)->cud_cuip->cui_item;
0218 }
0219 
0220 static const struct xfs_item_ops xfs_cud_item_ops = {
0221     .flags      = XFS_ITEM_RELEASE_WHEN_COMMITTED |
0222               XFS_ITEM_INTENT_DONE,
0223     .iop_size   = xfs_cud_item_size,
0224     .iop_format = xfs_cud_item_format,
0225     .iop_release    = xfs_cud_item_release,
0226     .iop_intent = xfs_cud_item_intent,
0227 };
0228 
0229 static struct xfs_cud_log_item *
0230 xfs_trans_get_cud(
0231     struct xfs_trans        *tp,
0232     struct xfs_cui_log_item     *cuip)
0233 {
0234     struct xfs_cud_log_item     *cudp;
0235 
0236     cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
0237     xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
0238               &xfs_cud_item_ops);
0239     cudp->cud_cuip = cuip;
0240     cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
0241 
0242     xfs_trans_add_item(tp, &cudp->cud_item);
0243     return cudp;
0244 }
0245 
0246 /*
0247  * Finish an refcount update and log it to the CUD. Note that the
0248  * transaction is marked dirty regardless of whether the refcount
0249  * update succeeds or fails to support the CUI/CUD lifecycle rules.
0250  */
0251 static int
0252 xfs_trans_log_finish_refcount_update(
0253     struct xfs_trans        *tp,
0254     struct xfs_cud_log_item     *cudp,
0255     enum xfs_refcount_intent_type   type,
0256     xfs_fsblock_t           startblock,
0257     xfs_extlen_t            blockcount,
0258     xfs_fsblock_t           *new_fsb,
0259     xfs_extlen_t            *new_len,
0260     struct xfs_btree_cur        **pcur)
0261 {
0262     int             error;
0263 
0264     error = xfs_refcount_finish_one(tp, type, startblock,
0265             blockcount, new_fsb, new_len, pcur);
0266 
0267     /*
0268      * Mark the transaction dirty, even on error. This ensures the
0269      * transaction is aborted, which:
0270      *
0271      * 1.) releases the CUI and frees the CUD
0272      * 2.) shuts down the filesystem
0273      */
0274     tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
0275     set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
0276 
0277     return error;
0278 }
0279 
0280 /* Sort refcount intents by AG. */
0281 static int
0282 xfs_refcount_update_diff_items(
0283     void                *priv,
0284     const struct list_head      *a,
0285     const struct list_head      *b)
0286 {
0287     struct xfs_mount        *mp = priv;
0288     struct xfs_refcount_intent  *ra;
0289     struct xfs_refcount_intent  *rb;
0290 
0291     ra = container_of(a, struct xfs_refcount_intent, ri_list);
0292     rb = container_of(b, struct xfs_refcount_intent, ri_list);
0293     return  XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
0294         XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
0295 }
0296 
0297 /* Set the phys extent flags for this reverse mapping. */
0298 static void
0299 xfs_trans_set_refcount_flags(
0300     struct xfs_phys_extent      *refc,
0301     enum xfs_refcount_intent_type   type)
0302 {
0303     refc->pe_flags = 0;
0304     switch (type) {
0305     case XFS_REFCOUNT_INCREASE:
0306     case XFS_REFCOUNT_DECREASE:
0307     case XFS_REFCOUNT_ALLOC_COW:
0308     case XFS_REFCOUNT_FREE_COW:
0309         refc->pe_flags |= type;
0310         break;
0311     default:
0312         ASSERT(0);
0313     }
0314 }
0315 
0316 /* Log refcount updates in the intent item. */
0317 STATIC void
0318 xfs_refcount_update_log_item(
0319     struct xfs_trans        *tp,
0320     struct xfs_cui_log_item     *cuip,
0321     struct xfs_refcount_intent  *refc)
0322 {
0323     uint                next_extent;
0324     struct xfs_phys_extent      *ext;
0325 
0326     tp->t_flags |= XFS_TRANS_DIRTY;
0327     set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
0328 
0329     /*
0330      * atomic_inc_return gives us the value after the increment;
0331      * we want to use it as an array index so we need to subtract 1 from
0332      * it.
0333      */
0334     next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
0335     ASSERT(next_extent < cuip->cui_format.cui_nextents);
0336     ext = &cuip->cui_format.cui_extents[next_extent];
0337     ext->pe_startblock = refc->ri_startblock;
0338     ext->pe_len = refc->ri_blockcount;
0339     xfs_trans_set_refcount_flags(ext, refc->ri_type);
0340 }
0341 
0342 static struct xfs_log_item *
0343 xfs_refcount_update_create_intent(
0344     struct xfs_trans        *tp,
0345     struct list_head        *items,
0346     unsigned int            count,
0347     bool                sort)
0348 {
0349     struct xfs_mount        *mp = tp->t_mountp;
0350     struct xfs_cui_log_item     *cuip = xfs_cui_init(mp, count);
0351     struct xfs_refcount_intent  *refc;
0352 
0353     ASSERT(count > 0);
0354 
0355     xfs_trans_add_item(tp, &cuip->cui_item);
0356     if (sort)
0357         list_sort(mp, items, xfs_refcount_update_diff_items);
0358     list_for_each_entry(refc, items, ri_list)
0359         xfs_refcount_update_log_item(tp, cuip, refc);
0360     return &cuip->cui_item;
0361 }
0362 
0363 /* Get an CUD so we can process all the deferred refcount updates. */
0364 static struct xfs_log_item *
0365 xfs_refcount_update_create_done(
0366     struct xfs_trans        *tp,
0367     struct xfs_log_item     *intent,
0368     unsigned int            count)
0369 {
0370     return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
0371 }
0372 
0373 /* Process a deferred refcount update. */
0374 STATIC int
0375 xfs_refcount_update_finish_item(
0376     struct xfs_trans        *tp,
0377     struct xfs_log_item     *done,
0378     struct list_head        *item,
0379     struct xfs_btree_cur        **state)
0380 {
0381     struct xfs_refcount_intent  *refc;
0382     xfs_fsblock_t           new_fsb;
0383     xfs_extlen_t            new_aglen;
0384     int             error;
0385 
0386     refc = container_of(item, struct xfs_refcount_intent, ri_list);
0387     error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
0388             refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
0389             &new_fsb, &new_aglen, state);
0390 
0391     /* Did we run out of reservation?  Requeue what we didn't finish. */
0392     if (!error && new_aglen > 0) {
0393         ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
0394                refc->ri_type == XFS_REFCOUNT_DECREASE);
0395         refc->ri_startblock = new_fsb;
0396         refc->ri_blockcount = new_aglen;
0397         return -EAGAIN;
0398     }
0399     kmem_cache_free(xfs_refcount_intent_cache, refc);
0400     return error;
0401 }
0402 
0403 /* Abort all pending CUIs. */
0404 STATIC void
0405 xfs_refcount_update_abort_intent(
0406     struct xfs_log_item     *intent)
0407 {
0408     xfs_cui_release(CUI_ITEM(intent));
0409 }
0410 
0411 /* Cancel a deferred refcount update. */
0412 STATIC void
0413 xfs_refcount_update_cancel_item(
0414     struct list_head        *item)
0415 {
0416     struct xfs_refcount_intent  *refc;
0417 
0418     refc = container_of(item, struct xfs_refcount_intent, ri_list);
0419     kmem_cache_free(xfs_refcount_intent_cache, refc);
0420 }
0421 
0422 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
0423     .max_items  = XFS_CUI_MAX_FAST_EXTENTS,
0424     .create_intent  = xfs_refcount_update_create_intent,
0425     .abort_intent   = xfs_refcount_update_abort_intent,
0426     .create_done    = xfs_refcount_update_create_done,
0427     .finish_item    = xfs_refcount_update_finish_item,
0428     .finish_cleanup = xfs_refcount_finish_one_cleanup,
0429     .cancel_item    = xfs_refcount_update_cancel_item,
0430 };
0431 
0432 /* Is this recovered CUI ok? */
0433 static inline bool
0434 xfs_cui_validate_phys(
0435     struct xfs_mount        *mp,
0436     struct xfs_phys_extent      *refc)
0437 {
0438     if (!xfs_has_reflink(mp))
0439         return false;
0440 
0441     if (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
0442         return false;
0443 
0444     switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
0445     case XFS_REFCOUNT_INCREASE:
0446     case XFS_REFCOUNT_DECREASE:
0447     case XFS_REFCOUNT_ALLOC_COW:
0448     case XFS_REFCOUNT_FREE_COW:
0449         break;
0450     default:
0451         return false;
0452     }
0453 
0454     return xfs_verify_fsbext(mp, refc->pe_startblock, refc->pe_len);
0455 }
0456 
0457 /*
0458  * Process a refcount update intent item that was recovered from the log.
0459  * We need to update the refcountbt.
0460  */
0461 STATIC int
0462 xfs_cui_item_recover(
0463     struct xfs_log_item     *lip,
0464     struct list_head        *capture_list)
0465 {
0466     struct xfs_bmbt_irec        irec;
0467     struct xfs_cui_log_item     *cuip = CUI_ITEM(lip);
0468     struct xfs_phys_extent      *refc;
0469     struct xfs_cud_log_item     *cudp;
0470     struct xfs_trans        *tp;
0471     struct xfs_btree_cur        *rcur = NULL;
0472     struct xfs_mount        *mp = lip->li_log->l_mp;
0473     xfs_fsblock_t           new_fsb;
0474     xfs_extlen_t            new_len;
0475     unsigned int            refc_type;
0476     bool                requeue_only = false;
0477     enum xfs_refcount_intent_type   type;
0478     int             i;
0479     int             error = 0;
0480 
0481     /*
0482      * First check the validity of the extents described by the
0483      * CUI.  If any are bad, then assume that all are bad and
0484      * just toss the CUI.
0485      */
0486     for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
0487         if (!xfs_cui_validate_phys(mp,
0488                     &cuip->cui_format.cui_extents[i])) {
0489             XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
0490                     &cuip->cui_format,
0491                     sizeof(cuip->cui_format));
0492             return -EFSCORRUPTED;
0493         }
0494     }
0495 
0496     /*
0497      * Under normal operation, refcount updates are deferred, so we
0498      * wouldn't be adding them directly to a transaction.  All
0499      * refcount updates manage reservation usage internally and
0500      * dynamically by deferring work that won't fit in the
0501      * transaction.  Normally, any work that needs to be deferred
0502      * gets attached to the same defer_ops that scheduled the
0503      * refcount update.  However, we're in log recovery here, so we
0504      * use the passed in defer_ops and to finish up any work that
0505      * doesn't fit.  We need to reserve enough blocks to handle a
0506      * full btree split on either end of the refcount range.
0507      */
0508     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
0509             mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
0510     if (error)
0511         return error;
0512 
0513     cudp = xfs_trans_get_cud(tp, cuip);
0514 
0515     for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
0516         refc = &cuip->cui_format.cui_extents[i];
0517         refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
0518         switch (refc_type) {
0519         case XFS_REFCOUNT_INCREASE:
0520         case XFS_REFCOUNT_DECREASE:
0521         case XFS_REFCOUNT_ALLOC_COW:
0522         case XFS_REFCOUNT_FREE_COW:
0523             type = refc_type;
0524             break;
0525         default:
0526             XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
0527             error = -EFSCORRUPTED;
0528             goto abort_error;
0529         }
0530         if (requeue_only) {
0531             new_fsb = refc->pe_startblock;
0532             new_len = refc->pe_len;
0533         } else
0534             error = xfs_trans_log_finish_refcount_update(tp, cudp,
0535                 type, refc->pe_startblock, refc->pe_len,
0536                 &new_fsb, &new_len, &rcur);
0537         if (error == -EFSCORRUPTED)
0538             XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
0539                     refc, sizeof(*refc));
0540         if (error)
0541             goto abort_error;
0542 
0543         /* Requeue what we didn't finish. */
0544         if (new_len > 0) {
0545             irec.br_startblock = new_fsb;
0546             irec.br_blockcount = new_len;
0547             switch (type) {
0548             case XFS_REFCOUNT_INCREASE:
0549                 xfs_refcount_increase_extent(tp, &irec);
0550                 break;
0551             case XFS_REFCOUNT_DECREASE:
0552                 xfs_refcount_decrease_extent(tp, &irec);
0553                 break;
0554             case XFS_REFCOUNT_ALLOC_COW:
0555                 xfs_refcount_alloc_cow_extent(tp,
0556                         irec.br_startblock,
0557                         irec.br_blockcount);
0558                 break;
0559             case XFS_REFCOUNT_FREE_COW:
0560                 xfs_refcount_free_cow_extent(tp,
0561                         irec.br_startblock,
0562                         irec.br_blockcount);
0563                 break;
0564             default:
0565                 ASSERT(0);
0566             }
0567             requeue_only = true;
0568         }
0569     }
0570 
0571     xfs_refcount_finish_one_cleanup(tp, rcur, error);
0572     return xfs_defer_ops_capture_and_commit(tp, capture_list);
0573 
0574 abort_error:
0575     xfs_refcount_finish_one_cleanup(tp, rcur, error);
0576     xfs_trans_cancel(tp);
0577     return error;
0578 }
0579 
0580 STATIC bool
0581 xfs_cui_item_match(
0582     struct xfs_log_item *lip,
0583     uint64_t        intent_id)
0584 {
0585     return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
0586 }
0587 
0588 /* Relog an intent item to push the log tail forward. */
0589 static struct xfs_log_item *
0590 xfs_cui_item_relog(
0591     struct xfs_log_item     *intent,
0592     struct xfs_trans        *tp)
0593 {
0594     struct xfs_cud_log_item     *cudp;
0595     struct xfs_cui_log_item     *cuip;
0596     struct xfs_phys_extent      *extp;
0597     unsigned int            count;
0598 
0599     count = CUI_ITEM(intent)->cui_format.cui_nextents;
0600     extp = CUI_ITEM(intent)->cui_format.cui_extents;
0601 
0602     tp->t_flags |= XFS_TRANS_DIRTY;
0603     cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
0604     set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
0605 
0606     cuip = xfs_cui_init(tp->t_mountp, count);
0607     memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
0608     atomic_set(&cuip->cui_next_extent, count);
0609     xfs_trans_add_item(tp, &cuip->cui_item);
0610     set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
0611     return &cuip->cui_item;
0612 }
0613 
0614 static const struct xfs_item_ops xfs_cui_item_ops = {
0615     .flags      = XFS_ITEM_INTENT,
0616     .iop_size   = xfs_cui_item_size,
0617     .iop_format = xfs_cui_item_format,
0618     .iop_unpin  = xfs_cui_item_unpin,
0619     .iop_release    = xfs_cui_item_release,
0620     .iop_recover    = xfs_cui_item_recover,
0621     .iop_match  = xfs_cui_item_match,
0622     .iop_relog  = xfs_cui_item_relog,
0623 };
0624 
0625 /*
0626  * Copy an CUI format buffer from the given buf, and into the destination
0627  * CUI format structure.  The CUI/CUD items were designed not to need any
0628  * special alignment handling.
0629  */
0630 static int
0631 xfs_cui_copy_format(
0632     struct xfs_log_iovec        *buf,
0633     struct xfs_cui_log_format   *dst_cui_fmt)
0634 {
0635     struct xfs_cui_log_format   *src_cui_fmt;
0636     uint                len;
0637 
0638     src_cui_fmt = buf->i_addr;
0639     len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
0640 
0641     if (buf->i_len == len) {
0642         memcpy(dst_cui_fmt, src_cui_fmt, len);
0643         return 0;
0644     }
0645     XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
0646     return -EFSCORRUPTED;
0647 }
0648 
0649 /*
0650  * This routine is called to create an in-core extent refcount update
0651  * item from the cui format structure which was logged on disk.
0652  * It allocates an in-core cui, copies the extents from the format
0653  * structure into it, and adds the cui to the AIL with the given
0654  * LSN.
0655  */
0656 STATIC int
0657 xlog_recover_cui_commit_pass2(
0658     struct xlog         *log,
0659     struct list_head        *buffer_list,
0660     struct xlog_recover_item    *item,
0661     xfs_lsn_t           lsn)
0662 {
0663     int             error;
0664     struct xfs_mount        *mp = log->l_mp;
0665     struct xfs_cui_log_item     *cuip;
0666     struct xfs_cui_log_format   *cui_formatp;
0667 
0668     cui_formatp = item->ri_buf[0].i_addr;
0669 
0670     cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
0671     error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
0672     if (error) {
0673         xfs_cui_item_free(cuip);
0674         return error;
0675     }
0676     atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
0677     /*
0678      * Insert the intent into the AIL directly and drop one reference so
0679      * that finishing or canceling the work will drop the other.
0680      */
0681     xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
0682     xfs_cui_release(cuip);
0683     return 0;
0684 }
0685 
0686 const struct xlog_recover_item_ops xlog_cui_item_ops = {
0687     .item_type      = XFS_LI_CUI,
0688     .commit_pass2       = xlog_recover_cui_commit_pass2,
0689 };
0690 
0691 /*
0692  * This routine is called when an CUD format structure is found in a committed
0693  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
0694  * was still in the log. To do this it searches the AIL for the CUI with an id
0695  * equal to that in the CUD format structure. If we find it we drop the CUD
0696  * reference, which removes the CUI from the AIL and frees it.
0697  */
0698 STATIC int
0699 xlog_recover_cud_commit_pass2(
0700     struct xlog         *log,
0701     struct list_head        *buffer_list,
0702     struct xlog_recover_item    *item,
0703     xfs_lsn_t           lsn)
0704 {
0705     struct xfs_cud_log_format   *cud_formatp;
0706 
0707     cud_formatp = item->ri_buf[0].i_addr;
0708     if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
0709         XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
0710         return -EFSCORRUPTED;
0711     }
0712 
0713     xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
0714     return 0;
0715 }
0716 
0717 const struct xlog_recover_item_ops xlog_cud_item_ops = {
0718     .item_type      = XFS_LI_CUD,
0719     .commit_pass2       = xlog_recover_cud_commit_pass2,
0720 };