0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_mount.h"
0013 #include "xfs_btree.h"
0014 #include "xfs_btree_staging.h"
0015 #include "xfs_refcount_btree.h"
0016 #include "xfs_alloc.h"
0017 #include "xfs_error.h"
0018 #include "xfs_trace.h"
0019 #include "xfs_trans.h"
0020 #include "xfs_bit.h"
0021 #include "xfs_rmap.h"
0022 #include "xfs_ag.h"
0023
0024 static struct kmem_cache *xfs_refcountbt_cur_cache;
0025
0026 static struct xfs_btree_cur *
0027 xfs_refcountbt_dup_cursor(
0028 struct xfs_btree_cur *cur)
0029 {
0030 return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
0031 cur->bc_ag.agbp, cur->bc_ag.pag);
0032 }
0033
0034 STATIC void
0035 xfs_refcountbt_set_root(
0036 struct xfs_btree_cur *cur,
0037 const union xfs_btree_ptr *ptr,
0038 int inc)
0039 {
0040 struct xfs_buf *agbp = cur->bc_ag.agbp;
0041 struct xfs_agf *agf = agbp->b_addr;
0042 struct xfs_perag *pag = agbp->b_pag;
0043
0044 ASSERT(ptr->s != 0);
0045
0046 agf->agf_refcount_root = ptr->s;
0047 be32_add_cpu(&agf->agf_refcount_level, inc);
0048 pag->pagf_refcount_level += inc;
0049
0050 xfs_alloc_log_agf(cur->bc_tp, agbp,
0051 XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
0052 }
0053
0054 STATIC int
0055 xfs_refcountbt_alloc_block(
0056 struct xfs_btree_cur *cur,
0057 const union xfs_btree_ptr *start,
0058 union xfs_btree_ptr *new,
0059 int *stat)
0060 {
0061 struct xfs_buf *agbp = cur->bc_ag.agbp;
0062 struct xfs_agf *agf = agbp->b_addr;
0063 struct xfs_alloc_arg args;
0064 int error;
0065
0066 memset(&args, 0, sizeof(args));
0067 args.tp = cur->bc_tp;
0068 args.mp = cur->bc_mp;
0069 args.type = XFS_ALLOCTYPE_NEAR_BNO;
0070 args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
0071 xfs_refc_block(args.mp));
0072 args.oinfo = XFS_RMAP_OINFO_REFC;
0073 args.minlen = args.maxlen = args.prod = 1;
0074 args.resv = XFS_AG_RESV_METADATA;
0075
0076 error = xfs_alloc_vextent(&args);
0077 if (error)
0078 goto out_error;
0079 trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
0080 args.agbno, 1);
0081 if (args.fsbno == NULLFSBLOCK) {
0082 *stat = 0;
0083 return 0;
0084 }
0085 ASSERT(args.agno == cur->bc_ag.pag->pag_agno);
0086 ASSERT(args.len == 1);
0087
0088 new->s = cpu_to_be32(args.agbno);
0089 be32_add_cpu(&agf->agf_refcount_blocks, 1);
0090 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
0091
0092 *stat = 1;
0093 return 0;
0094
0095 out_error:
0096 return error;
0097 }
0098
0099 STATIC int
0100 xfs_refcountbt_free_block(
0101 struct xfs_btree_cur *cur,
0102 struct xfs_buf *bp)
0103 {
0104 struct xfs_mount *mp = cur->bc_mp;
0105 struct xfs_buf *agbp = cur->bc_ag.agbp;
0106 struct xfs_agf *agf = agbp->b_addr;
0107 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
0108 int error;
0109
0110 trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
0111 XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
0112 be32_add_cpu(&agf->agf_refcount_blocks, -1);
0113 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
0114 error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
0115 XFS_AG_RESV_METADATA);
0116 if (error)
0117 return error;
0118
0119 return error;
0120 }
0121
0122 STATIC int
0123 xfs_refcountbt_get_minrecs(
0124 struct xfs_btree_cur *cur,
0125 int level)
0126 {
0127 return cur->bc_mp->m_refc_mnr[level != 0];
0128 }
0129
0130 STATIC int
0131 xfs_refcountbt_get_maxrecs(
0132 struct xfs_btree_cur *cur,
0133 int level)
0134 {
0135 return cur->bc_mp->m_refc_mxr[level != 0];
0136 }
0137
0138 STATIC void
0139 xfs_refcountbt_init_key_from_rec(
0140 union xfs_btree_key *key,
0141 const union xfs_btree_rec *rec)
0142 {
0143 key->refc.rc_startblock = rec->refc.rc_startblock;
0144 }
0145
0146 STATIC void
0147 xfs_refcountbt_init_high_key_from_rec(
0148 union xfs_btree_key *key,
0149 const union xfs_btree_rec *rec)
0150 {
0151 __u32 x;
0152
0153 x = be32_to_cpu(rec->refc.rc_startblock);
0154 x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
0155 key->refc.rc_startblock = cpu_to_be32(x);
0156 }
0157
0158 STATIC void
0159 xfs_refcountbt_init_rec_from_cur(
0160 struct xfs_btree_cur *cur,
0161 union xfs_btree_rec *rec)
0162 {
0163 rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
0164 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
0165 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
0166 }
0167
0168 STATIC void
0169 xfs_refcountbt_init_ptr_from_cur(
0170 struct xfs_btree_cur *cur,
0171 union xfs_btree_ptr *ptr)
0172 {
0173 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
0174
0175 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno));
0176
0177 ptr->s = agf->agf_refcount_root;
0178 }
0179
0180 STATIC int64_t
0181 xfs_refcountbt_key_diff(
0182 struct xfs_btree_cur *cur,
0183 const union xfs_btree_key *key)
0184 {
0185 struct xfs_refcount_irec *rec = &cur->bc_rec.rc;
0186 const struct xfs_refcount_key *kp = &key->refc;
0187
0188 return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
0189 }
0190
0191 STATIC int64_t
0192 xfs_refcountbt_diff_two_keys(
0193 struct xfs_btree_cur *cur,
0194 const union xfs_btree_key *k1,
0195 const union xfs_btree_key *k2)
0196 {
0197 return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
0198 be32_to_cpu(k2->refc.rc_startblock);
0199 }
0200
0201 STATIC xfs_failaddr_t
0202 xfs_refcountbt_verify(
0203 struct xfs_buf *bp)
0204 {
0205 struct xfs_mount *mp = bp->b_mount;
0206 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
0207 struct xfs_perag *pag = bp->b_pag;
0208 xfs_failaddr_t fa;
0209 unsigned int level;
0210
0211 if (!xfs_verify_magic(bp, block->bb_magic))
0212 return __this_address;
0213
0214 if (!xfs_has_reflink(mp))
0215 return __this_address;
0216 fa = xfs_btree_sblock_v5hdr_verify(bp);
0217 if (fa)
0218 return fa;
0219
0220 level = be16_to_cpu(block->bb_level);
0221 if (pag && pag->pagf_init) {
0222 if (level >= pag->pagf_refcount_level)
0223 return __this_address;
0224 } else if (level >= mp->m_refc_maxlevels)
0225 return __this_address;
0226
0227 return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
0228 }
0229
0230 STATIC void
0231 xfs_refcountbt_read_verify(
0232 struct xfs_buf *bp)
0233 {
0234 xfs_failaddr_t fa;
0235
0236 if (!xfs_btree_sblock_verify_crc(bp))
0237 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
0238 else {
0239 fa = xfs_refcountbt_verify(bp);
0240 if (fa)
0241 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
0242 }
0243
0244 if (bp->b_error)
0245 trace_xfs_btree_corrupt(bp, _RET_IP_);
0246 }
0247
0248 STATIC void
0249 xfs_refcountbt_write_verify(
0250 struct xfs_buf *bp)
0251 {
0252 xfs_failaddr_t fa;
0253
0254 fa = xfs_refcountbt_verify(bp);
0255 if (fa) {
0256 trace_xfs_btree_corrupt(bp, _RET_IP_);
0257 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
0258 return;
0259 }
0260 xfs_btree_sblock_calc_crc(bp);
0261
0262 }
0263
0264 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
0265 .name = "xfs_refcountbt",
0266 .magic = { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
0267 .verify_read = xfs_refcountbt_read_verify,
0268 .verify_write = xfs_refcountbt_write_verify,
0269 .verify_struct = xfs_refcountbt_verify,
0270 };
0271
0272 STATIC int
0273 xfs_refcountbt_keys_inorder(
0274 struct xfs_btree_cur *cur,
0275 const union xfs_btree_key *k1,
0276 const union xfs_btree_key *k2)
0277 {
0278 return be32_to_cpu(k1->refc.rc_startblock) <
0279 be32_to_cpu(k2->refc.rc_startblock);
0280 }
0281
0282 STATIC int
0283 xfs_refcountbt_recs_inorder(
0284 struct xfs_btree_cur *cur,
0285 const union xfs_btree_rec *r1,
0286 const union xfs_btree_rec *r2)
0287 {
0288 return be32_to_cpu(r1->refc.rc_startblock) +
0289 be32_to_cpu(r1->refc.rc_blockcount) <=
0290 be32_to_cpu(r2->refc.rc_startblock);
0291 }
0292
0293 static const struct xfs_btree_ops xfs_refcountbt_ops = {
0294 .rec_len = sizeof(struct xfs_refcount_rec),
0295 .key_len = sizeof(struct xfs_refcount_key),
0296
0297 .dup_cursor = xfs_refcountbt_dup_cursor,
0298 .set_root = xfs_refcountbt_set_root,
0299 .alloc_block = xfs_refcountbt_alloc_block,
0300 .free_block = xfs_refcountbt_free_block,
0301 .get_minrecs = xfs_refcountbt_get_minrecs,
0302 .get_maxrecs = xfs_refcountbt_get_maxrecs,
0303 .init_key_from_rec = xfs_refcountbt_init_key_from_rec,
0304 .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
0305 .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
0306 .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
0307 .key_diff = xfs_refcountbt_key_diff,
0308 .buf_ops = &xfs_refcountbt_buf_ops,
0309 .diff_two_keys = xfs_refcountbt_diff_two_keys,
0310 .keys_inorder = xfs_refcountbt_keys_inorder,
0311 .recs_inorder = xfs_refcountbt_recs_inorder,
0312 };
0313
0314
0315
0316
0317 static struct xfs_btree_cur *
0318 xfs_refcountbt_init_common(
0319 struct xfs_mount *mp,
0320 struct xfs_trans *tp,
0321 struct xfs_perag *pag)
0322 {
0323 struct xfs_btree_cur *cur;
0324
0325 ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
0326
0327 cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
0328 mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
0329 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
0330
0331 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
0332
0333
0334 atomic_inc(&pag->pag_ref);
0335 cur->bc_ag.pag = pag;
0336
0337 cur->bc_ag.refc.nr_ops = 0;
0338 cur->bc_ag.refc.shape_changes = 0;
0339 cur->bc_ops = &xfs_refcountbt_ops;
0340 return cur;
0341 }
0342
0343
0344 struct xfs_btree_cur *
0345 xfs_refcountbt_init_cursor(
0346 struct xfs_mount *mp,
0347 struct xfs_trans *tp,
0348 struct xfs_buf *agbp,
0349 struct xfs_perag *pag)
0350 {
0351 struct xfs_agf *agf = agbp->b_addr;
0352 struct xfs_btree_cur *cur;
0353
0354 cur = xfs_refcountbt_init_common(mp, tp, pag);
0355 cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
0356 cur->bc_ag.agbp = agbp;
0357 return cur;
0358 }
0359
0360
0361 struct xfs_btree_cur *
0362 xfs_refcountbt_stage_cursor(
0363 struct xfs_mount *mp,
0364 struct xbtree_afakeroot *afake,
0365 struct xfs_perag *pag)
0366 {
0367 struct xfs_btree_cur *cur;
0368
0369 cur = xfs_refcountbt_init_common(mp, NULL, pag);
0370 xfs_btree_stage_afakeroot(cur, afake);
0371 return cur;
0372 }
0373
0374
0375
0376
0377
0378 void
0379 xfs_refcountbt_commit_staged_btree(
0380 struct xfs_btree_cur *cur,
0381 struct xfs_trans *tp,
0382 struct xfs_buf *agbp)
0383 {
0384 struct xfs_agf *agf = agbp->b_addr;
0385 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
0386
0387 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
0388
0389 agf->agf_refcount_root = cpu_to_be32(afake->af_root);
0390 agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
0391 agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
0392 xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
0393 XFS_AGF_REFCOUNT_ROOT |
0394 XFS_AGF_REFCOUNT_LEVEL);
0395 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
0396 }
0397
0398
0399 static inline unsigned int
0400 xfs_refcountbt_block_maxrecs(
0401 unsigned int blocklen,
0402 bool leaf)
0403 {
0404 if (leaf)
0405 return blocklen / sizeof(struct xfs_refcount_rec);
0406 return blocklen / (sizeof(struct xfs_refcount_key) +
0407 sizeof(xfs_refcount_ptr_t));
0408 }
0409
0410
0411
0412
0413 int
0414 xfs_refcountbt_maxrecs(
0415 int blocklen,
0416 bool leaf)
0417 {
0418 blocklen -= XFS_REFCOUNT_BLOCK_LEN;
0419 return xfs_refcountbt_block_maxrecs(blocklen, leaf);
0420 }
0421
0422
0423 unsigned int
0424 xfs_refcountbt_maxlevels_ondisk(void)
0425 {
0426 unsigned int minrecs[2];
0427 unsigned int blocklen;
0428
0429 blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN;
0430
0431 minrecs[0] = xfs_refcountbt_block_maxrecs(blocklen, true) / 2;
0432 minrecs[1] = xfs_refcountbt_block_maxrecs(blocklen, false) / 2;
0433
0434 return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_CRC_AG_BLOCKS);
0435 }
0436
0437
0438 void
0439 xfs_refcountbt_compute_maxlevels(
0440 struct xfs_mount *mp)
0441 {
0442 if (!xfs_has_reflink(mp)) {
0443 mp->m_refc_maxlevels = 0;
0444 return;
0445 }
0446
0447 mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
0448 mp->m_refc_mnr, mp->m_sb.sb_agblocks);
0449 ASSERT(mp->m_refc_maxlevels <= xfs_refcountbt_maxlevels_ondisk());
0450 }
0451
0452
0453 xfs_extlen_t
0454 xfs_refcountbt_calc_size(
0455 struct xfs_mount *mp,
0456 unsigned long long len)
0457 {
0458 return xfs_btree_calc_size(mp->m_refc_mnr, len);
0459 }
0460
0461
0462
0463
0464 xfs_extlen_t
0465 xfs_refcountbt_max_size(
0466 struct xfs_mount *mp,
0467 xfs_agblock_t agblocks)
0468 {
0469
0470 if (mp->m_refc_mxr[0] == 0)
0471 return 0;
0472
0473 return xfs_refcountbt_calc_size(mp, agblocks);
0474 }
0475
0476
0477
0478
0479 int
0480 xfs_refcountbt_calc_reserves(
0481 struct xfs_mount *mp,
0482 struct xfs_trans *tp,
0483 struct xfs_perag *pag,
0484 xfs_extlen_t *ask,
0485 xfs_extlen_t *used)
0486 {
0487 struct xfs_buf *agbp;
0488 struct xfs_agf *agf;
0489 xfs_agblock_t agblocks;
0490 xfs_extlen_t tree_len;
0491 int error;
0492
0493 if (!xfs_has_reflink(mp))
0494 return 0;
0495
0496 error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
0497 if (error)
0498 return error;
0499
0500 agf = agbp->b_addr;
0501 agblocks = be32_to_cpu(agf->agf_length);
0502 tree_len = be32_to_cpu(agf->agf_refcount_blocks);
0503 xfs_trans_brelse(tp, agbp);
0504
0505
0506
0507
0508
0509
0510 if (xfs_ag_contains_log(mp, pag->pag_agno))
0511 agblocks -= mp->m_sb.sb_logblocks;
0512
0513 *ask += xfs_refcountbt_max_size(mp, agblocks);
0514 *used += tree_len;
0515
0516 return error;
0517 }
0518
0519 int __init
0520 xfs_refcountbt_init_cur_cache(void)
0521 {
0522 xfs_refcountbt_cur_cache = kmem_cache_create("xfs_refcbt_cur",
0523 xfs_btree_cur_sizeof(xfs_refcountbt_maxlevels_ondisk()),
0524 0, 0, NULL);
0525
0526 if (!xfs_refcountbt_cur_cache)
0527 return -ENOMEM;
0528 return 0;
0529 }
0530
0531 void
0532 xfs_refcountbt_destroy_cur_cache(void)
0533 {
0534 kmem_cache_destroy(xfs_refcountbt_cur_cache);
0535 xfs_refcountbt_cur_cache = NULL;
0536 }