Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
0004  * All Rights Reserved.
0005  */
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_mount.h"
0013 #include "xfs_btree.h"
0014 #include "xfs_btree_staging.h"
0015 #include "xfs_alloc_btree.h"
0016 #include "xfs_alloc.h"
0017 #include "xfs_extent_busy.h"
0018 #include "xfs_error.h"
0019 #include "xfs_trace.h"
0020 #include "xfs_trans.h"
0021 #include "xfs_ag.h"
0022 
0023 static struct kmem_cache    *xfs_allocbt_cur_cache;
0024 
0025 STATIC struct xfs_btree_cur *
0026 xfs_allocbt_dup_cursor(
0027     struct xfs_btree_cur    *cur)
0028 {
0029     return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
0030             cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum);
0031 }
0032 
0033 STATIC void
0034 xfs_allocbt_set_root(
0035     struct xfs_btree_cur        *cur,
0036     const union xfs_btree_ptr   *ptr,
0037     int             inc)
0038 {
0039     struct xfs_buf      *agbp = cur->bc_ag.agbp;
0040     struct xfs_agf      *agf = agbp->b_addr;
0041     int         btnum = cur->bc_btnum;
0042 
0043     ASSERT(ptr->s != 0);
0044 
0045     agf->agf_roots[btnum] = ptr->s;
0046     be32_add_cpu(&agf->agf_levels[btnum], inc);
0047     cur->bc_ag.pag->pagf_levels[btnum] += inc;
0048 
0049     xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
0050 }
0051 
0052 STATIC int
0053 xfs_allocbt_alloc_block(
0054     struct xfs_btree_cur        *cur,
0055     const union xfs_btree_ptr   *start,
0056     union xfs_btree_ptr     *new,
0057     int             *stat)
0058 {
0059     int         error;
0060     xfs_agblock_t       bno;
0061 
0062     /* Allocate the new block from the freelist. If we can't, give up.  */
0063     error = xfs_alloc_get_freelist(cur->bc_ag.pag, cur->bc_tp,
0064             cur->bc_ag.agbp, &bno, 1);
0065     if (error)
0066         return error;
0067 
0068     if (bno == NULLAGBLOCK) {
0069         *stat = 0;
0070         return 0;
0071     }
0072 
0073     atomic64_inc(&cur->bc_mp->m_allocbt_blks);
0074     xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.pag, bno, 1, false);
0075 
0076     new->s = cpu_to_be32(bno);
0077 
0078     *stat = 1;
0079     return 0;
0080 }
0081 
0082 STATIC int
0083 xfs_allocbt_free_block(
0084     struct xfs_btree_cur    *cur,
0085     struct xfs_buf      *bp)
0086 {
0087     struct xfs_buf      *agbp = cur->bc_ag.agbp;
0088     xfs_agblock_t       bno;
0089     int         error;
0090 
0091     bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
0092     error = xfs_alloc_put_freelist(cur->bc_ag.pag, cur->bc_tp, agbp, NULL,
0093             bno, 1);
0094     if (error)
0095         return error;
0096 
0097     atomic64_dec(&cur->bc_mp->m_allocbt_blks);
0098     xfs_extent_busy_insert(cur->bc_tp, agbp->b_pag, bno, 1,
0099                   XFS_EXTENT_BUSY_SKIP_DISCARD);
0100     return 0;
0101 }
0102 
0103 /*
0104  * Update the longest extent in the AGF
0105  */
0106 STATIC void
0107 xfs_allocbt_update_lastrec(
0108     struct xfs_btree_cur        *cur,
0109     const struct xfs_btree_block    *block,
0110     const union xfs_btree_rec   *rec,
0111     int             ptr,
0112     int             reason)
0113 {
0114     struct xfs_agf      *agf = cur->bc_ag.agbp->b_addr;
0115     struct xfs_perag    *pag;
0116     __be32          len;
0117     int         numrecs;
0118 
0119     ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
0120 
0121     switch (reason) {
0122     case LASTREC_UPDATE:
0123         /*
0124          * If this is the last leaf block and it's the last record,
0125          * then update the size of the longest extent in the AG.
0126          */
0127         if (ptr != xfs_btree_get_numrecs(block))
0128             return;
0129         len = rec->alloc.ar_blockcount;
0130         break;
0131     case LASTREC_INSREC:
0132         if (be32_to_cpu(rec->alloc.ar_blockcount) <=
0133             be32_to_cpu(agf->agf_longest))
0134             return;
0135         len = rec->alloc.ar_blockcount;
0136         break;
0137     case LASTREC_DELREC:
0138         numrecs = xfs_btree_get_numrecs(block);
0139         if (ptr <= numrecs)
0140             return;
0141         ASSERT(ptr == numrecs + 1);
0142 
0143         if (numrecs) {
0144             xfs_alloc_rec_t *rrp;
0145 
0146             rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
0147             len = rrp->ar_blockcount;
0148         } else {
0149             len = 0;
0150         }
0151 
0152         break;
0153     default:
0154         ASSERT(0);
0155         return;
0156     }
0157 
0158     agf->agf_longest = len;
0159     pag = cur->bc_ag.agbp->b_pag;
0160     pag->pagf_longest = be32_to_cpu(len);
0161     xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
0162 }
0163 
0164 STATIC int
0165 xfs_allocbt_get_minrecs(
0166     struct xfs_btree_cur    *cur,
0167     int         level)
0168 {
0169     return cur->bc_mp->m_alloc_mnr[level != 0];
0170 }
0171 
0172 STATIC int
0173 xfs_allocbt_get_maxrecs(
0174     struct xfs_btree_cur    *cur,
0175     int         level)
0176 {
0177     return cur->bc_mp->m_alloc_mxr[level != 0];
0178 }
0179 
0180 STATIC void
0181 xfs_allocbt_init_key_from_rec(
0182     union xfs_btree_key     *key,
0183     const union xfs_btree_rec   *rec)
0184 {
0185     key->alloc.ar_startblock = rec->alloc.ar_startblock;
0186     key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
0187 }
0188 
0189 STATIC void
0190 xfs_bnobt_init_high_key_from_rec(
0191     union xfs_btree_key     *key,
0192     const union xfs_btree_rec   *rec)
0193 {
0194     __u32               x;
0195 
0196     x = be32_to_cpu(rec->alloc.ar_startblock);
0197     x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
0198     key->alloc.ar_startblock = cpu_to_be32(x);
0199     key->alloc.ar_blockcount = 0;
0200 }
0201 
0202 STATIC void
0203 xfs_cntbt_init_high_key_from_rec(
0204     union xfs_btree_key     *key,
0205     const union xfs_btree_rec   *rec)
0206 {
0207     key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
0208     key->alloc.ar_startblock = 0;
0209 }
0210 
0211 STATIC void
0212 xfs_allocbt_init_rec_from_cur(
0213     struct xfs_btree_cur    *cur,
0214     union xfs_btree_rec *rec)
0215 {
0216     rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
0217     rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
0218 }
0219 
0220 STATIC void
0221 xfs_allocbt_init_ptr_from_cur(
0222     struct xfs_btree_cur    *cur,
0223     union xfs_btree_ptr *ptr)
0224 {
0225     struct xfs_agf      *agf = cur->bc_ag.agbp->b_addr;
0226 
0227     ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
0228 
0229     ptr->s = agf->agf_roots[cur->bc_btnum];
0230 }
0231 
0232 STATIC int64_t
0233 xfs_bnobt_key_diff(
0234     struct xfs_btree_cur        *cur,
0235     const union xfs_btree_key   *key)
0236 {
0237     struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
0238     const struct xfs_alloc_rec  *kp = &key->alloc;
0239 
0240     return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
0241 }
0242 
0243 STATIC int64_t
0244 xfs_cntbt_key_diff(
0245     struct xfs_btree_cur        *cur,
0246     const union xfs_btree_key   *key)
0247 {
0248     struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
0249     const struct xfs_alloc_rec  *kp = &key->alloc;
0250     int64_t             diff;
0251 
0252     diff = (int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
0253     if (diff)
0254         return diff;
0255 
0256     return (int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
0257 }
0258 
0259 STATIC int64_t
0260 xfs_bnobt_diff_two_keys(
0261     struct xfs_btree_cur        *cur,
0262     const union xfs_btree_key   *k1,
0263     const union xfs_btree_key   *k2)
0264 {
0265     return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
0266               be32_to_cpu(k2->alloc.ar_startblock);
0267 }
0268 
0269 STATIC int64_t
0270 xfs_cntbt_diff_two_keys(
0271     struct xfs_btree_cur        *cur,
0272     const union xfs_btree_key   *k1,
0273     const union xfs_btree_key   *k2)
0274 {
0275     int64_t             diff;
0276 
0277     diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
0278         be32_to_cpu(k2->alloc.ar_blockcount);
0279     if (diff)
0280         return diff;
0281 
0282     return  be32_to_cpu(k1->alloc.ar_startblock) -
0283         be32_to_cpu(k2->alloc.ar_startblock);
0284 }
0285 
0286 static xfs_failaddr_t
0287 xfs_allocbt_verify(
0288     struct xfs_buf      *bp)
0289 {
0290     struct xfs_mount    *mp = bp->b_mount;
0291     struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
0292     struct xfs_perag    *pag = bp->b_pag;
0293     xfs_failaddr_t      fa;
0294     unsigned int        level;
0295     xfs_btnum_t     btnum = XFS_BTNUM_BNOi;
0296 
0297     if (!xfs_verify_magic(bp, block->bb_magic))
0298         return __this_address;
0299 
0300     if (xfs_has_crc(mp)) {
0301         fa = xfs_btree_sblock_v5hdr_verify(bp);
0302         if (fa)
0303             return fa;
0304     }
0305 
0306     /*
0307      * The perag may not be attached during grow operations or fully
0308      * initialized from the AGF during log recovery. Therefore we can only
0309      * check against maximum tree depth from those contexts.
0310      *
0311      * Otherwise check against the per-tree limit. Peek at one of the
0312      * verifier magic values to determine the type of tree we're verifying
0313      * against.
0314      */
0315     level = be16_to_cpu(block->bb_level);
0316     if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC))
0317         btnum = XFS_BTNUM_CNTi;
0318     if (pag && pag->pagf_init) {
0319         if (level >= pag->pagf_levels[btnum])
0320             return __this_address;
0321     } else if (level >= mp->m_alloc_maxlevels)
0322         return __this_address;
0323 
0324     return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
0325 }
0326 
0327 static void
0328 xfs_allocbt_read_verify(
0329     struct xfs_buf  *bp)
0330 {
0331     xfs_failaddr_t  fa;
0332 
0333     if (!xfs_btree_sblock_verify_crc(bp))
0334         xfs_verifier_error(bp, -EFSBADCRC, __this_address);
0335     else {
0336         fa = xfs_allocbt_verify(bp);
0337         if (fa)
0338             xfs_verifier_error(bp, -EFSCORRUPTED, fa);
0339     }
0340 
0341     if (bp->b_error)
0342         trace_xfs_btree_corrupt(bp, _RET_IP_);
0343 }
0344 
0345 static void
0346 xfs_allocbt_write_verify(
0347     struct xfs_buf  *bp)
0348 {
0349     xfs_failaddr_t  fa;
0350 
0351     fa = xfs_allocbt_verify(bp);
0352     if (fa) {
0353         trace_xfs_btree_corrupt(bp, _RET_IP_);
0354         xfs_verifier_error(bp, -EFSCORRUPTED, fa);
0355         return;
0356     }
0357     xfs_btree_sblock_calc_crc(bp);
0358 
0359 }
0360 
0361 const struct xfs_buf_ops xfs_bnobt_buf_ops = {
0362     .name = "xfs_bnobt",
0363     .magic = { cpu_to_be32(XFS_ABTB_MAGIC),
0364            cpu_to_be32(XFS_ABTB_CRC_MAGIC) },
0365     .verify_read = xfs_allocbt_read_verify,
0366     .verify_write = xfs_allocbt_write_verify,
0367     .verify_struct = xfs_allocbt_verify,
0368 };
0369 
0370 const struct xfs_buf_ops xfs_cntbt_buf_ops = {
0371     .name = "xfs_cntbt",
0372     .magic = { cpu_to_be32(XFS_ABTC_MAGIC),
0373            cpu_to_be32(XFS_ABTC_CRC_MAGIC) },
0374     .verify_read = xfs_allocbt_read_verify,
0375     .verify_write = xfs_allocbt_write_verify,
0376     .verify_struct = xfs_allocbt_verify,
0377 };
0378 
0379 STATIC int
0380 xfs_bnobt_keys_inorder(
0381     struct xfs_btree_cur        *cur,
0382     const union xfs_btree_key   *k1,
0383     const union xfs_btree_key   *k2)
0384 {
0385     return be32_to_cpu(k1->alloc.ar_startblock) <
0386            be32_to_cpu(k2->alloc.ar_startblock);
0387 }
0388 
0389 STATIC int
0390 xfs_bnobt_recs_inorder(
0391     struct xfs_btree_cur        *cur,
0392     const union xfs_btree_rec   *r1,
0393     const union xfs_btree_rec   *r2)
0394 {
0395     return be32_to_cpu(r1->alloc.ar_startblock) +
0396         be32_to_cpu(r1->alloc.ar_blockcount) <=
0397         be32_to_cpu(r2->alloc.ar_startblock);
0398 }
0399 
0400 STATIC int
0401 xfs_cntbt_keys_inorder(
0402     struct xfs_btree_cur        *cur,
0403     const union xfs_btree_key   *k1,
0404     const union xfs_btree_key   *k2)
0405 {
0406     return be32_to_cpu(k1->alloc.ar_blockcount) <
0407         be32_to_cpu(k2->alloc.ar_blockcount) ||
0408         (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
0409          be32_to_cpu(k1->alloc.ar_startblock) <
0410          be32_to_cpu(k2->alloc.ar_startblock));
0411 }
0412 
0413 STATIC int
0414 xfs_cntbt_recs_inorder(
0415     struct xfs_btree_cur        *cur,
0416     const union xfs_btree_rec   *r1,
0417     const union xfs_btree_rec   *r2)
0418 {
0419     return be32_to_cpu(r1->alloc.ar_blockcount) <
0420         be32_to_cpu(r2->alloc.ar_blockcount) ||
0421         (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
0422          be32_to_cpu(r1->alloc.ar_startblock) <
0423          be32_to_cpu(r2->alloc.ar_startblock));
0424 }
0425 
0426 static const struct xfs_btree_ops xfs_bnobt_ops = {
0427     .rec_len        = sizeof(xfs_alloc_rec_t),
0428     .key_len        = sizeof(xfs_alloc_key_t),
0429 
0430     .dup_cursor     = xfs_allocbt_dup_cursor,
0431     .set_root       = xfs_allocbt_set_root,
0432     .alloc_block        = xfs_allocbt_alloc_block,
0433     .free_block     = xfs_allocbt_free_block,
0434     .update_lastrec     = xfs_allocbt_update_lastrec,
0435     .get_minrecs        = xfs_allocbt_get_minrecs,
0436     .get_maxrecs        = xfs_allocbt_get_maxrecs,
0437     .init_key_from_rec  = xfs_allocbt_init_key_from_rec,
0438     .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
0439     .init_rec_from_cur  = xfs_allocbt_init_rec_from_cur,
0440     .init_ptr_from_cur  = xfs_allocbt_init_ptr_from_cur,
0441     .key_diff       = xfs_bnobt_key_diff,
0442     .buf_ops        = &xfs_bnobt_buf_ops,
0443     .diff_two_keys      = xfs_bnobt_diff_two_keys,
0444     .keys_inorder       = xfs_bnobt_keys_inorder,
0445     .recs_inorder       = xfs_bnobt_recs_inorder,
0446 };
0447 
0448 static const struct xfs_btree_ops xfs_cntbt_ops = {
0449     .rec_len        = sizeof(xfs_alloc_rec_t),
0450     .key_len        = sizeof(xfs_alloc_key_t),
0451 
0452     .dup_cursor     = xfs_allocbt_dup_cursor,
0453     .set_root       = xfs_allocbt_set_root,
0454     .alloc_block        = xfs_allocbt_alloc_block,
0455     .free_block     = xfs_allocbt_free_block,
0456     .update_lastrec     = xfs_allocbt_update_lastrec,
0457     .get_minrecs        = xfs_allocbt_get_minrecs,
0458     .get_maxrecs        = xfs_allocbt_get_maxrecs,
0459     .init_key_from_rec  = xfs_allocbt_init_key_from_rec,
0460     .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
0461     .init_rec_from_cur  = xfs_allocbt_init_rec_from_cur,
0462     .init_ptr_from_cur  = xfs_allocbt_init_ptr_from_cur,
0463     .key_diff       = xfs_cntbt_key_diff,
0464     .buf_ops        = &xfs_cntbt_buf_ops,
0465     .diff_two_keys      = xfs_cntbt_diff_two_keys,
0466     .keys_inorder       = xfs_cntbt_keys_inorder,
0467     .recs_inorder       = xfs_cntbt_recs_inorder,
0468 };
0469 
0470 /* Allocate most of a new allocation btree cursor. */
0471 STATIC struct xfs_btree_cur *
0472 xfs_allocbt_init_common(
0473     struct xfs_mount    *mp,
0474     struct xfs_trans    *tp,
0475     struct xfs_perag    *pag,
0476     xfs_btnum_t     btnum)
0477 {
0478     struct xfs_btree_cur    *cur;
0479 
0480     ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
0481 
0482     cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
0483             xfs_allocbt_cur_cache);
0484     cur->bc_ag.abt.active = false;
0485 
0486     if (btnum == XFS_BTNUM_CNT) {
0487         cur->bc_ops = &xfs_cntbt_ops;
0488         cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
0489         cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
0490     } else {
0491         cur->bc_ops = &xfs_bnobt_ops;
0492         cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
0493     }
0494 
0495     /* take a reference for the cursor */
0496     atomic_inc(&pag->pag_ref);
0497     cur->bc_ag.pag = pag;
0498 
0499     if (xfs_has_crc(mp))
0500         cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
0501 
0502     return cur;
0503 }
0504 
0505 /*
0506  * Allocate a new allocation btree cursor.
0507  */
0508 struct xfs_btree_cur *          /* new alloc btree cursor */
0509 xfs_allocbt_init_cursor(
0510     struct xfs_mount    *mp,        /* file system mount point */
0511     struct xfs_trans    *tp,        /* transaction pointer */
0512     struct xfs_buf      *agbp,      /* buffer for agf structure */
0513     struct xfs_perag    *pag,
0514     xfs_btnum_t     btnum)      /* btree identifier */
0515 {
0516     struct xfs_agf      *agf = agbp->b_addr;
0517     struct xfs_btree_cur    *cur;
0518 
0519     cur = xfs_allocbt_init_common(mp, tp, pag, btnum);
0520     if (btnum == XFS_BTNUM_CNT)
0521         cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
0522     else
0523         cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
0524 
0525     cur->bc_ag.agbp = agbp;
0526 
0527     return cur;
0528 }
0529 
0530 /* Create a free space btree cursor with a fake root for staging. */
0531 struct xfs_btree_cur *
0532 xfs_allocbt_stage_cursor(
0533     struct xfs_mount    *mp,
0534     struct xbtree_afakeroot *afake,
0535     struct xfs_perag    *pag,
0536     xfs_btnum_t     btnum)
0537 {
0538     struct xfs_btree_cur    *cur;
0539 
0540     cur = xfs_allocbt_init_common(mp, NULL, pag, btnum);
0541     xfs_btree_stage_afakeroot(cur, afake);
0542     return cur;
0543 }
0544 
0545 /*
0546  * Install a new free space btree root.  Caller is responsible for invalidating
0547  * and freeing the old btree blocks.
0548  */
0549 void
0550 xfs_allocbt_commit_staged_btree(
0551     struct xfs_btree_cur    *cur,
0552     struct xfs_trans    *tp,
0553     struct xfs_buf      *agbp)
0554 {
0555     struct xfs_agf      *agf = agbp->b_addr;
0556     struct xbtree_afakeroot *afake = cur->bc_ag.afake;
0557 
0558     ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
0559 
0560     agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
0561     agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
0562     xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
0563 
0564     if (cur->bc_btnum == XFS_BTNUM_BNO) {
0565         xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
0566     } else {
0567         cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
0568         xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
0569     }
0570 }
0571 
0572 /* Calculate number of records in an alloc btree block. */
0573 static inline unsigned int
0574 xfs_allocbt_block_maxrecs(
0575     unsigned int        blocklen,
0576     bool            leaf)
0577 {
0578     if (leaf)
0579         return blocklen / sizeof(xfs_alloc_rec_t);
0580     return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t));
0581 }
0582 
0583 /*
0584  * Calculate number of records in an alloc btree block.
0585  */
0586 int
0587 xfs_allocbt_maxrecs(
0588     struct xfs_mount    *mp,
0589     int         blocklen,
0590     int         leaf)
0591 {
0592     blocklen -= XFS_ALLOC_BLOCK_LEN(mp);
0593     return xfs_allocbt_block_maxrecs(blocklen, leaf);
0594 }
0595 
0596 /* Free space btrees are at their largest when every other block is free. */
0597 #define XFS_MAX_FREESP_RECORDS  ((XFS_MAX_AG_BLOCKS + 1) / 2)
0598 
0599 /* Compute the max possible height for free space btrees. */
0600 unsigned int
0601 xfs_allocbt_maxlevels_ondisk(void)
0602 {
0603     unsigned int        minrecs[2];
0604     unsigned int        blocklen;
0605 
0606     blocklen = min(XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN,
0607                XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN);
0608 
0609     minrecs[0] = xfs_allocbt_block_maxrecs(blocklen, true) / 2;
0610     minrecs[1] = xfs_allocbt_block_maxrecs(blocklen, false) / 2;
0611 
0612     return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_FREESP_RECORDS);
0613 }
0614 
0615 /* Calculate the freespace btree size for some records. */
0616 xfs_extlen_t
0617 xfs_allocbt_calc_size(
0618     struct xfs_mount    *mp,
0619     unsigned long long  len)
0620 {
0621     return xfs_btree_calc_size(mp->m_alloc_mnr, len);
0622 }
0623 
0624 int __init
0625 xfs_allocbt_init_cur_cache(void)
0626 {
0627     xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
0628             xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
0629             0, 0, NULL);
0630 
0631     if (!xfs_allocbt_cur_cache)
0632         return -ENOMEM;
0633     return 0;
0634 }
0635 
0636 void
0637 xfs_allocbt_destroy_cur_cache(void)
0638 {
0639     kmem_cache_destroy(xfs_allocbt_cur_cache);
0640     xfs_allocbt_cur_cache = NULL;
0641 }