0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_format.h"
0009 #include "xfs_log_format.h"
0010 #include "xfs_shared.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_mount.h"
0014 #include "xfs_defer.h"
0015 #include "xfs_btree.h"
0016 #include "xfs_rmap.h"
0017 #include "xfs_alloc_btree.h"
0018 #include "xfs_alloc.h"
0019 #include "xfs_extent_busy.h"
0020 #include "xfs_errortag.h"
0021 #include "xfs_error.h"
0022 #include "xfs_trace.h"
0023 #include "xfs_trans.h"
0024 #include "xfs_buf_item.h"
0025 #include "xfs_log.h"
0026 #include "xfs_ag.h"
0027 #include "xfs_ag_resv.h"
0028 #include "xfs_bmap.h"
0029
0030 struct kmem_cache *xfs_extfree_item_cache;
0031
0032 struct workqueue_struct *xfs_alloc_wq;
0033
0034 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
0035
0036 #define XFSA_FIXUP_BNO_OK 1
0037 #define XFSA_FIXUP_CNT_OK 2
0038
0039 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
0040 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
0041 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
0042
0043
0044
0045
0046
0047
0048 unsigned int
0049 xfs_agfl_size(
0050 struct xfs_mount *mp)
0051 {
0052 unsigned int size = mp->m_sb.sb_sectsize;
0053
0054 if (xfs_has_crc(mp))
0055 size -= sizeof(struct xfs_agfl);
0056
0057 return size / sizeof(xfs_agblock_t);
0058 }
0059
0060 unsigned int
0061 xfs_refc_block(
0062 struct xfs_mount *mp)
0063 {
0064 if (xfs_has_rmapbt(mp))
0065 return XFS_RMAP_BLOCK(mp) + 1;
0066 if (xfs_has_finobt(mp))
0067 return XFS_FIBT_BLOCK(mp) + 1;
0068 return XFS_IBT_BLOCK(mp) + 1;
0069 }
0070
0071 xfs_extlen_t
0072 xfs_prealloc_blocks(
0073 struct xfs_mount *mp)
0074 {
0075 if (xfs_has_reflink(mp))
0076 return xfs_refc_block(mp) + 1;
0077 if (xfs_has_rmapbt(mp))
0078 return XFS_RMAP_BLOCK(mp) + 1;
0079 if (xfs_has_finobt(mp))
0080 return XFS_FIBT_BLOCK(mp) + 1;
0081 return XFS_IBT_BLOCK(mp) + 1;
0082 }
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 #define XFS_ALLOCBT_AGFL_RESERVE 4
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 unsigned int
0118 xfs_alloc_set_aside(
0119 struct xfs_mount *mp)
0120 {
0121 return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4);
0122 }
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 unsigned int
0139 xfs_alloc_ag_max_usable(
0140 struct xfs_mount *mp)
0141 {
0142 unsigned int blocks;
0143
0144 blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4));
0145 blocks += XFS_ALLOCBT_AGFL_RESERVE;
0146 blocks += 3;
0147 if (xfs_has_finobt(mp))
0148 blocks++;
0149 if (xfs_has_rmapbt(mp))
0150 blocks++;
0151 if (xfs_has_reflink(mp))
0152 blocks++;
0153
0154 return mp->m_sb.sb_agblocks - blocks;
0155 }
0156
0157
0158
0159
0160 STATIC int
0161 xfs_alloc_lookup_eq(
0162 struct xfs_btree_cur *cur,
0163 xfs_agblock_t bno,
0164 xfs_extlen_t len,
0165 int *stat)
0166 {
0167 int error;
0168
0169 cur->bc_rec.a.ar_startblock = bno;
0170 cur->bc_rec.a.ar_blockcount = len;
0171 error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
0172 cur->bc_ag.abt.active = (*stat == 1);
0173 return error;
0174 }
0175
0176
0177
0178
0179
0180 int
0181 xfs_alloc_lookup_ge(
0182 struct xfs_btree_cur *cur,
0183 xfs_agblock_t bno,
0184 xfs_extlen_t len,
0185 int *stat)
0186 {
0187 int error;
0188
0189 cur->bc_rec.a.ar_startblock = bno;
0190 cur->bc_rec.a.ar_blockcount = len;
0191 error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
0192 cur->bc_ag.abt.active = (*stat == 1);
0193 return error;
0194 }
0195
0196
0197
0198
0199
0200 int
0201 xfs_alloc_lookup_le(
0202 struct xfs_btree_cur *cur,
0203 xfs_agblock_t bno,
0204 xfs_extlen_t len,
0205 int *stat)
0206 {
0207 int error;
0208 cur->bc_rec.a.ar_startblock = bno;
0209 cur->bc_rec.a.ar_blockcount = len;
0210 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
0211 cur->bc_ag.abt.active = (*stat == 1);
0212 return error;
0213 }
0214
0215 static inline bool
0216 xfs_alloc_cur_active(
0217 struct xfs_btree_cur *cur)
0218 {
0219 return cur && cur->bc_ag.abt.active;
0220 }
0221
0222
0223
0224
0225
0226
0227 STATIC int
0228 xfs_alloc_update(
0229 struct xfs_btree_cur *cur,
0230 xfs_agblock_t bno,
0231 xfs_extlen_t len)
0232 {
0233 union xfs_btree_rec rec;
0234
0235 rec.alloc.ar_startblock = cpu_to_be32(bno);
0236 rec.alloc.ar_blockcount = cpu_to_be32(len);
0237 return xfs_btree_update(cur, &rec);
0238 }
0239
0240
0241
0242
0243 int
0244 xfs_alloc_get_rec(
0245 struct xfs_btree_cur *cur,
0246 xfs_agblock_t *bno,
0247 xfs_extlen_t *len,
0248 int *stat)
0249 {
0250 struct xfs_mount *mp = cur->bc_mp;
0251 struct xfs_perag *pag = cur->bc_ag.pag;
0252 union xfs_btree_rec *rec;
0253 int error;
0254
0255 error = xfs_btree_get_rec(cur, &rec, stat);
0256 if (error || !(*stat))
0257 return error;
0258
0259 *bno = be32_to_cpu(rec->alloc.ar_startblock);
0260 *len = be32_to_cpu(rec->alloc.ar_blockcount);
0261
0262 if (*len == 0)
0263 goto out_bad_rec;
0264
0265
0266 if (!xfs_verify_agbno(pag, *bno))
0267 goto out_bad_rec;
0268 if (*bno > *bno + *len)
0269 goto out_bad_rec;
0270 if (!xfs_verify_agbno(pag, *bno + *len - 1))
0271 goto out_bad_rec;
0272
0273 return 0;
0274
0275 out_bad_rec:
0276 xfs_warn(mp,
0277 "%s Freespace BTree record corruption in AG %d detected!",
0278 cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size",
0279 pag->pag_agno);
0280 xfs_warn(mp,
0281 "start block 0x%x block count 0x%x", *bno, *len);
0282 return -EFSCORRUPTED;
0283 }
0284
0285
0286
0287
0288
0289 STATIC bool
0290 xfs_alloc_compute_aligned(
0291 xfs_alloc_arg_t *args,
0292 xfs_agblock_t foundbno,
0293 xfs_extlen_t foundlen,
0294 xfs_agblock_t *resbno,
0295 xfs_extlen_t *reslen,
0296 unsigned *busy_gen)
0297 {
0298 xfs_agblock_t bno = foundbno;
0299 xfs_extlen_t len = foundlen;
0300 xfs_extlen_t diff;
0301 bool busy;
0302
0303
0304 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
0305
0306
0307
0308
0309
0310 if (bno < args->min_agbno && bno + len > args->min_agbno) {
0311 diff = args->min_agbno - bno;
0312 if (len > diff) {
0313 bno += diff;
0314 len -= diff;
0315 }
0316 }
0317
0318 if (args->alignment > 1 && len >= args->minlen) {
0319 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
0320
0321 diff = aligned_bno - bno;
0322
0323 *resbno = aligned_bno;
0324 *reslen = diff >= len ? 0 : len - diff;
0325 } else {
0326 *resbno = bno;
0327 *reslen = len;
0328 }
0329
0330 return busy;
0331 }
0332
0333
0334
0335
0336
0337 STATIC xfs_extlen_t
0338 xfs_alloc_compute_diff(
0339 xfs_agblock_t wantbno,
0340 xfs_extlen_t wantlen,
0341 xfs_extlen_t alignment,
0342 int datatype,
0343 xfs_agblock_t freebno,
0344 xfs_extlen_t freelen,
0345 xfs_agblock_t *newbnop)
0346 {
0347 xfs_agblock_t freeend;
0348 xfs_agblock_t newbno1;
0349 xfs_agblock_t newbno2;
0350 xfs_extlen_t newlen1=0;
0351 xfs_extlen_t newlen2=0;
0352 xfs_agblock_t wantend;
0353 bool userdata = datatype & XFS_ALLOC_USERDATA;
0354
0355 ASSERT(freelen >= wantlen);
0356 freeend = freebno + freelen;
0357 wantend = wantbno + wantlen;
0358
0359
0360
0361
0362
0363
0364
0365 if (freebno >= wantbno || (userdata && freeend < wantend)) {
0366 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
0367 newbno1 = NULLAGBLOCK;
0368 } else if (freeend >= wantend && alignment > 1) {
0369 newbno1 = roundup(wantbno, alignment);
0370 newbno2 = newbno1 - alignment;
0371 if (newbno1 >= freeend)
0372 newbno1 = NULLAGBLOCK;
0373 else
0374 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
0375 if (newbno2 < freebno)
0376 newbno2 = NULLAGBLOCK;
0377 else
0378 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
0379 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
0380 if (newlen1 < newlen2 ||
0381 (newlen1 == newlen2 &&
0382 XFS_ABSDIFF(newbno1, wantbno) >
0383 XFS_ABSDIFF(newbno2, wantbno)))
0384 newbno1 = newbno2;
0385 } else if (newbno2 != NULLAGBLOCK)
0386 newbno1 = newbno2;
0387 } else if (freeend >= wantend) {
0388 newbno1 = wantbno;
0389 } else if (alignment > 1) {
0390 newbno1 = roundup(freeend - wantlen, alignment);
0391 if (newbno1 > freeend - wantlen &&
0392 newbno1 - alignment >= freebno)
0393 newbno1 -= alignment;
0394 else if (newbno1 >= freeend)
0395 newbno1 = NULLAGBLOCK;
0396 } else
0397 newbno1 = freeend - wantlen;
0398 *newbnop = newbno1;
0399 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
0400 }
0401
0402
0403
0404
0405
0406
0407
0408 STATIC void
0409 xfs_alloc_fix_len(
0410 xfs_alloc_arg_t *args)
0411 {
0412 xfs_extlen_t k;
0413 xfs_extlen_t rlen;
0414
0415 ASSERT(args->mod < args->prod);
0416 rlen = args->len;
0417 ASSERT(rlen >= args->minlen);
0418 ASSERT(rlen <= args->maxlen);
0419 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
0420 (args->mod == 0 && rlen < args->prod))
0421 return;
0422 k = rlen % args->prod;
0423 if (k == args->mod)
0424 return;
0425 if (k > args->mod)
0426 rlen = rlen - (k - args->mod);
0427 else
0428 rlen = rlen - args->prod + (args->mod - k);
0429
0430 if ((int)rlen < (int)args->minlen)
0431 return;
0432 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
0433 ASSERT(rlen % args->prod == args->mod);
0434 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
0435 rlen + args->minleft);
0436 args->len = rlen;
0437 }
0438
0439
0440
0441
0442
0443
0444
0445
0446 STATIC int
0447 xfs_alloc_fixup_trees(
0448 struct xfs_btree_cur *cnt_cur,
0449 struct xfs_btree_cur *bno_cur,
0450 xfs_agblock_t fbno,
0451 xfs_extlen_t flen,
0452 xfs_agblock_t rbno,
0453 xfs_extlen_t rlen,
0454 int flags)
0455 {
0456 int error;
0457 int i;
0458 xfs_agblock_t nfbno1;
0459 xfs_agblock_t nfbno2;
0460 xfs_extlen_t nflen1=0;
0461 xfs_extlen_t nflen2=0;
0462 struct xfs_mount *mp;
0463
0464 mp = cnt_cur->bc_mp;
0465
0466
0467
0468
0469 if (flags & XFSA_FIXUP_CNT_OK) {
0470 #ifdef DEBUG
0471 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
0472 return error;
0473 if (XFS_IS_CORRUPT(mp,
0474 i != 1 ||
0475 nfbno1 != fbno ||
0476 nflen1 != flen))
0477 return -EFSCORRUPTED;
0478 #endif
0479 } else {
0480 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
0481 return error;
0482 if (XFS_IS_CORRUPT(mp, i != 1))
0483 return -EFSCORRUPTED;
0484 }
0485
0486
0487
0488 if (flags & XFSA_FIXUP_BNO_OK) {
0489 #ifdef DEBUG
0490 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
0491 return error;
0492 if (XFS_IS_CORRUPT(mp,
0493 i != 1 ||
0494 nfbno1 != fbno ||
0495 nflen1 != flen))
0496 return -EFSCORRUPTED;
0497 #endif
0498 } else {
0499 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
0500 return error;
0501 if (XFS_IS_CORRUPT(mp, i != 1))
0502 return -EFSCORRUPTED;
0503 }
0504
0505 #ifdef DEBUG
0506 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
0507 struct xfs_btree_block *bnoblock;
0508 struct xfs_btree_block *cntblock;
0509
0510 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp);
0511 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp);
0512
0513 if (XFS_IS_CORRUPT(mp,
0514 bnoblock->bb_numrecs !=
0515 cntblock->bb_numrecs))
0516 return -EFSCORRUPTED;
0517 }
0518 #endif
0519
0520
0521
0522
0523
0524
0525 if (rbno == fbno && rlen == flen)
0526 nfbno1 = nfbno2 = NULLAGBLOCK;
0527 else if (rbno == fbno) {
0528 nfbno1 = rbno + rlen;
0529 nflen1 = flen - rlen;
0530 nfbno2 = NULLAGBLOCK;
0531 } else if (rbno + rlen == fbno + flen) {
0532 nfbno1 = fbno;
0533 nflen1 = flen - rlen;
0534 nfbno2 = NULLAGBLOCK;
0535 } else {
0536 nfbno1 = fbno;
0537 nflen1 = rbno - fbno;
0538 nfbno2 = rbno + rlen;
0539 nflen2 = (fbno + flen) - nfbno2;
0540 }
0541
0542
0543
0544 if ((error = xfs_btree_delete(cnt_cur, &i)))
0545 return error;
0546 if (XFS_IS_CORRUPT(mp, i != 1))
0547 return -EFSCORRUPTED;
0548
0549
0550
0551 if (nfbno1 != NULLAGBLOCK) {
0552 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
0553 return error;
0554 if (XFS_IS_CORRUPT(mp, i != 0))
0555 return -EFSCORRUPTED;
0556 if ((error = xfs_btree_insert(cnt_cur, &i)))
0557 return error;
0558 if (XFS_IS_CORRUPT(mp, i != 1))
0559 return -EFSCORRUPTED;
0560 }
0561 if (nfbno2 != NULLAGBLOCK) {
0562 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
0563 return error;
0564 if (XFS_IS_CORRUPT(mp, i != 0))
0565 return -EFSCORRUPTED;
0566 if ((error = xfs_btree_insert(cnt_cur, &i)))
0567 return error;
0568 if (XFS_IS_CORRUPT(mp, i != 1))
0569 return -EFSCORRUPTED;
0570 }
0571
0572
0573
0574 if (nfbno1 == NULLAGBLOCK) {
0575
0576
0577
0578 if ((error = xfs_btree_delete(bno_cur, &i)))
0579 return error;
0580 if (XFS_IS_CORRUPT(mp, i != 1))
0581 return -EFSCORRUPTED;
0582 } else {
0583
0584
0585
0586 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
0587 return error;
0588 }
0589 if (nfbno2 != NULLAGBLOCK) {
0590
0591
0592
0593 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
0594 return error;
0595 if (XFS_IS_CORRUPT(mp, i != 0))
0596 return -EFSCORRUPTED;
0597 if ((error = xfs_btree_insert(bno_cur, &i)))
0598 return error;
0599 if (XFS_IS_CORRUPT(mp, i != 1))
0600 return -EFSCORRUPTED;
0601 }
0602 return 0;
0603 }
0604
0605 static xfs_failaddr_t
0606 xfs_agfl_verify(
0607 struct xfs_buf *bp)
0608 {
0609 struct xfs_mount *mp = bp->b_mount;
0610 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
0611 __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
0612 int i;
0613
0614
0615
0616
0617
0618
0619
0620 if (!xfs_has_crc(mp))
0621 return NULL;
0622
0623 if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
0624 return __this_address;
0625 if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
0626 return __this_address;
0627
0628
0629
0630
0631
0632
0633 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
0634 return __this_address;
0635
0636 for (i = 0; i < xfs_agfl_size(mp); i++) {
0637 if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
0638 be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
0639 return __this_address;
0640 }
0641
0642 if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
0643 return __this_address;
0644 return NULL;
0645 }
0646
0647 static void
0648 xfs_agfl_read_verify(
0649 struct xfs_buf *bp)
0650 {
0651 struct xfs_mount *mp = bp->b_mount;
0652 xfs_failaddr_t fa;
0653
0654
0655
0656
0657
0658
0659
0660 if (!xfs_has_crc(mp))
0661 return;
0662
0663 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
0664 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
0665 else {
0666 fa = xfs_agfl_verify(bp);
0667 if (fa)
0668 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
0669 }
0670 }
0671
0672 static void
0673 xfs_agfl_write_verify(
0674 struct xfs_buf *bp)
0675 {
0676 struct xfs_mount *mp = bp->b_mount;
0677 struct xfs_buf_log_item *bip = bp->b_log_item;
0678 xfs_failaddr_t fa;
0679
0680
0681 if (!xfs_has_crc(mp))
0682 return;
0683
0684 fa = xfs_agfl_verify(bp);
0685 if (fa) {
0686 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
0687 return;
0688 }
0689
0690 if (bip)
0691 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
0692
0693 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
0694 }
0695
0696 const struct xfs_buf_ops xfs_agfl_buf_ops = {
0697 .name = "xfs_agfl",
0698 .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
0699 .verify_read = xfs_agfl_read_verify,
0700 .verify_write = xfs_agfl_write_verify,
0701 .verify_struct = xfs_agfl_verify,
0702 };
0703
0704
0705
0706
0707 int
0708 xfs_alloc_read_agfl(
0709 struct xfs_perag *pag,
0710 struct xfs_trans *tp,
0711 struct xfs_buf **bpp)
0712 {
0713 struct xfs_mount *mp = pag->pag_mount;
0714 struct xfs_buf *bp;
0715 int error;
0716
0717 error = xfs_trans_read_buf(
0718 mp, tp, mp->m_ddev_targp,
0719 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGFL_DADDR(mp)),
0720 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
0721 if (error)
0722 return error;
0723 xfs_buf_set_ref(bp, XFS_AGFL_REF);
0724 *bpp = bp;
0725 return 0;
0726 }
0727
0728 STATIC int
0729 xfs_alloc_update_counters(
0730 struct xfs_trans *tp,
0731 struct xfs_buf *agbp,
0732 long len)
0733 {
0734 struct xfs_agf *agf = agbp->b_addr;
0735
0736 agbp->b_pag->pagf_freeblks += len;
0737 be32_add_cpu(&agf->agf_freeblks, len);
0738
0739 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
0740 be32_to_cpu(agf->agf_length))) {
0741 xfs_buf_mark_corrupt(agbp);
0742 return -EFSCORRUPTED;
0743 }
0744
0745 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
0746 return 0;
0747 }
0748
0749
0750
0751
0752 struct xfs_alloc_cur {
0753 struct xfs_btree_cur *cnt;
0754 struct xfs_btree_cur *bnolt;
0755 struct xfs_btree_cur *bnogt;
0756 xfs_extlen_t cur_len;
0757 xfs_agblock_t rec_bno;
0758 xfs_extlen_t rec_len;
0759 xfs_agblock_t bno;
0760 xfs_extlen_t len;
0761 xfs_extlen_t diff;
0762 unsigned int busy_gen;
0763 bool busy;
0764 };
0765
0766
0767
0768
0769
0770
0771 static int
0772 xfs_alloc_cur_setup(
0773 struct xfs_alloc_arg *args,
0774 struct xfs_alloc_cur *acur)
0775 {
0776 int error;
0777 int i;
0778
0779 ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
0780
0781 acur->cur_len = args->maxlen;
0782 acur->rec_bno = 0;
0783 acur->rec_len = 0;
0784 acur->bno = 0;
0785 acur->len = 0;
0786 acur->diff = -1;
0787 acur->busy = false;
0788 acur->busy_gen = 0;
0789
0790
0791
0792
0793
0794
0795 if (!acur->cnt)
0796 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
0797 args->agbp, args->pag, XFS_BTNUM_CNT);
0798 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
0799 if (error)
0800 return error;
0801
0802
0803
0804
0805 if (!acur->bnolt)
0806 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
0807 args->agbp, args->pag, XFS_BTNUM_BNO);
0808 if (!acur->bnogt)
0809 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
0810 args->agbp, args->pag, XFS_BTNUM_BNO);
0811 return i == 1 ? 0 : -ENOSPC;
0812 }
0813
0814 static void
0815 xfs_alloc_cur_close(
0816 struct xfs_alloc_cur *acur,
0817 bool error)
0818 {
0819 int cur_error = XFS_BTREE_NOERROR;
0820
0821 if (error)
0822 cur_error = XFS_BTREE_ERROR;
0823
0824 if (acur->cnt)
0825 xfs_btree_del_cursor(acur->cnt, cur_error);
0826 if (acur->bnolt)
0827 xfs_btree_del_cursor(acur->bnolt, cur_error);
0828 if (acur->bnogt)
0829 xfs_btree_del_cursor(acur->bnogt, cur_error);
0830 acur->cnt = acur->bnolt = acur->bnogt = NULL;
0831 }
0832
0833
0834
0835
0836
0837
0838
0839 static int
0840 xfs_alloc_cur_check(
0841 struct xfs_alloc_arg *args,
0842 struct xfs_alloc_cur *acur,
0843 struct xfs_btree_cur *cur,
0844 int *new)
0845 {
0846 int error, i;
0847 xfs_agblock_t bno, bnoa, bnew;
0848 xfs_extlen_t len, lena, diff = -1;
0849 bool busy;
0850 unsigned busy_gen = 0;
0851 bool deactivate = false;
0852 bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
0853
0854 *new = 0;
0855
0856 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
0857 if (error)
0858 return error;
0859 if (XFS_IS_CORRUPT(args->mp, i != 1))
0860 return -EFSCORRUPTED;
0861
0862
0863
0864
0865
0866 if (len < args->minlen) {
0867 deactivate = !isbnobt;
0868 goto out;
0869 }
0870
0871 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
0872 &busy_gen);
0873 acur->busy |= busy;
0874 if (busy)
0875 acur->busy_gen = busy_gen;
0876
0877 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
0878 deactivate = isbnobt;
0879 goto out;
0880 }
0881 if (lena < args->minlen)
0882 goto out;
0883
0884 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
0885 xfs_alloc_fix_len(args);
0886 ASSERT(args->len >= args->minlen);
0887 if (args->len < acur->len)
0888 goto out;
0889
0890
0891
0892
0893
0894 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
0895 diff = xfs_alloc_compute_diff(args->agbno, args->len,
0896 args->alignment, args->datatype,
0897 bnoa, lena, &bnew);
0898 if (bnew == NULLAGBLOCK)
0899 goto out;
0900
0901
0902
0903
0904 if (diff > acur->diff) {
0905 deactivate = isbnobt;
0906 goto out;
0907 }
0908
0909 ASSERT(args->len > acur->len ||
0910 (args->len == acur->len && diff <= acur->diff));
0911 acur->rec_bno = bno;
0912 acur->rec_len = len;
0913 acur->bno = bnew;
0914 acur->len = args->len;
0915 acur->diff = diff;
0916 *new = 1;
0917
0918
0919
0920
0921
0922
0923 if (acur->diff == 0 && acur->len == args->maxlen)
0924 deactivate = true;
0925 out:
0926 if (deactivate)
0927 cur->bc_ag.abt.active = false;
0928 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
0929 *new);
0930 return 0;
0931 }
0932
0933
0934
0935
0936
0937 STATIC int
0938 xfs_alloc_cur_finish(
0939 struct xfs_alloc_arg *args,
0940 struct xfs_alloc_cur *acur)
0941 {
0942 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
0943 int error;
0944
0945 ASSERT(acur->cnt && acur->bnolt);
0946 ASSERT(acur->bno >= acur->rec_bno);
0947 ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
0948 ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
0949
0950 error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
0951 acur->rec_len, acur->bno, acur->len, 0);
0952 if (error)
0953 return error;
0954
0955 args->agbno = acur->bno;
0956 args->len = acur->len;
0957 args->wasfromfl = 0;
0958
0959 trace_xfs_alloc_cur(args);
0960 return 0;
0961 }
0962
0963
0964
0965
0966
0967 STATIC int
0968 xfs_alloc_cntbt_iter(
0969 struct xfs_alloc_arg *args,
0970 struct xfs_alloc_cur *acur)
0971 {
0972 struct xfs_btree_cur *cur = acur->cnt;
0973 xfs_agblock_t bno;
0974 xfs_extlen_t len, cur_len;
0975 int error;
0976 int i;
0977
0978 if (!xfs_alloc_cur_active(cur))
0979 return 0;
0980
0981
0982 cur_len = acur->cur_len;
0983 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
0984 if (error)
0985 return error;
0986 if (i == 0)
0987 return 0;
0988 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
0989 if (error)
0990 return error;
0991
0992
0993 error = xfs_alloc_cur_check(args, acur, cur, &i);
0994 if (error)
0995 return error;
0996 ASSERT(len >= acur->cur_len);
0997 acur->cur_len = len;
0998
0999
1000
1001
1002
1003
1004
1005
1006 if (bno > args->agbno) {
1007 error = xfs_btree_decrement(cur, 0, &i);
1008 if (!error && i) {
1009 error = xfs_alloc_get_rec(cur, &bno, &len, &i);
1010 if (!error && i && len == acur->cur_len)
1011 error = xfs_alloc_cur_check(args, acur, cur,
1012 &i);
1013 }
1014 if (error)
1015 return error;
1016 }
1017
1018
1019
1020
1021
1022
1023
1024 cur_len <<= 1;
1025 if (!acur->len || acur->cur_len >= cur_len)
1026 acur->cur_len++;
1027 else
1028 acur->cur_len = cur_len;
1029
1030 return error;
1031 }
1032
1033
1034
1035
1036
1037
1038 STATIC int
1039 xfs_alloc_ag_vextent_small(
1040 struct xfs_alloc_arg *args,
1041 struct xfs_btree_cur *ccur,
1042 xfs_agblock_t *fbnop,
1043 xfs_extlen_t *flenp,
1044 int *stat)
1045 {
1046 struct xfs_agf *agf = args->agbp->b_addr;
1047 int error = 0;
1048 xfs_agblock_t fbno = NULLAGBLOCK;
1049 xfs_extlen_t flen = 0;
1050 int i = 0;
1051
1052
1053
1054
1055
1056
1057
1058 if (ccur)
1059 error = xfs_btree_decrement(ccur, 0, &i);
1060 if (error)
1061 goto error;
1062 if (i) {
1063 error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
1064 if (error)
1065 goto error;
1066 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1067 error = -EFSCORRUPTED;
1068 goto error;
1069 }
1070 goto out;
1071 }
1072
1073 if (args->minlen != 1 || args->alignment != 1 ||
1074 args->resv == XFS_AG_RESV_AGFL ||
1075 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1076 goto out;
1077
1078 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1079 &fbno, 0);
1080 if (error)
1081 goto error;
1082 if (fbno == NULLAGBLOCK)
1083 goto out;
1084
1085 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1086 (args->datatype & XFS_ALLOC_NOBUSY));
1087
1088 if (args->datatype & XFS_ALLOC_USERDATA) {
1089 struct xfs_buf *bp;
1090
1091 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1092 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1093 args->mp->m_bsize, 0, &bp);
1094 if (error)
1095 goto error;
1096 xfs_trans_binval(args->tp, bp);
1097 }
1098 *fbnop = args->agbno = fbno;
1099 *flenp = args->len = 1;
1100 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1101 error = -EFSCORRUPTED;
1102 goto error;
1103 }
1104 args->wasfromfl = 1;
1105 trace_xfs_alloc_small_freelist(args);
1106
1107
1108
1109
1110
1111 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1112 &XFS_RMAP_OINFO_AG);
1113 if (error)
1114 goto error;
1115
1116 *stat = 0;
1117 return 0;
1118
1119 out:
1120
1121
1122
1123 if (flen < args->minlen) {
1124 args->agbno = NULLAGBLOCK;
1125 trace_xfs_alloc_small_notenough(args);
1126 flen = 0;
1127 }
1128 *fbnop = fbno;
1129 *flenp = flen;
1130 *stat = 1;
1131 trace_xfs_alloc_small_done(args);
1132 return 0;
1133
1134 error:
1135 trace_xfs_alloc_small_error(args);
1136 return error;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 STATIC int
1148 xfs_alloc_ag_vextent(
1149 xfs_alloc_arg_t *args)
1150 {
1151 int error=0;
1152
1153 ASSERT(args->minlen > 0);
1154 ASSERT(args->maxlen > 0);
1155 ASSERT(args->minlen <= args->maxlen);
1156 ASSERT(args->mod < args->prod);
1157 ASSERT(args->alignment > 0);
1158
1159
1160
1161
1162 args->wasfromfl = 0;
1163 switch (args->type) {
1164 case XFS_ALLOCTYPE_THIS_AG:
1165 error = xfs_alloc_ag_vextent_size(args);
1166 break;
1167 case XFS_ALLOCTYPE_NEAR_BNO:
1168 error = xfs_alloc_ag_vextent_near(args);
1169 break;
1170 case XFS_ALLOCTYPE_THIS_BNO:
1171 error = xfs_alloc_ag_vextent_exact(args);
1172 break;
1173 default:
1174 ASSERT(0);
1175
1176 }
1177
1178 if (error || args->agbno == NULLAGBLOCK)
1179 return error;
1180
1181 ASSERT(args->len >= args->minlen);
1182 ASSERT(args->len <= args->maxlen);
1183 ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
1184 ASSERT(args->agbno % args->alignment == 0);
1185
1186
1187 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
1188 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
1189 args->agbno, args->len, &args->oinfo);
1190 if (error)
1191 return error;
1192 }
1193
1194 if (!args->wasfromfl) {
1195 error = xfs_alloc_update_counters(args->tp, args->agbp,
1196 -((long)(args->len)));
1197 if (error)
1198 return error;
1199
1200 ASSERT(!xfs_extent_busy_search(args->mp, args->pag,
1201 args->agbno, args->len));
1202 }
1203
1204 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
1205
1206 XFS_STATS_INC(args->mp, xs_allocx);
1207 XFS_STATS_ADD(args->mp, xs_allocb, args->len);
1208 return error;
1209 }
1210
1211
1212
1213
1214
1215
1216
1217 STATIC int
1218 xfs_alloc_ag_vextent_exact(
1219 xfs_alloc_arg_t *args)
1220 {
1221 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1222 struct xfs_btree_cur *bno_cur;
1223 struct xfs_btree_cur *cnt_cur;
1224 int error;
1225 xfs_agblock_t fbno;
1226 xfs_extlen_t flen;
1227 xfs_agblock_t tbno;
1228 xfs_extlen_t tlen;
1229 xfs_agblock_t tend;
1230 int i;
1231 unsigned busy_gen;
1232
1233 ASSERT(args->alignment == 1);
1234
1235
1236
1237
1238 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1239 args->pag, XFS_BTNUM_BNO);
1240
1241
1242
1243
1244
1245
1246 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1247 if (error)
1248 goto error0;
1249 if (!i)
1250 goto not_found;
1251
1252
1253
1254
1255 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
1256 if (error)
1257 goto error0;
1258 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1259 error = -EFSCORRUPTED;
1260 goto error0;
1261 }
1262 ASSERT(fbno <= args->agbno);
1263
1264
1265
1266
1267 tbno = fbno;
1268 tlen = flen;
1269 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1270
1271
1272
1273
1274
1275 if (tbno > args->agbno)
1276 goto not_found;
1277 if (tlen < args->minlen)
1278 goto not_found;
1279 tend = tbno + tlen;
1280 if (tend < args->agbno + args->minlen)
1281 goto not_found;
1282
1283
1284
1285
1286
1287
1288
1289 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1290 - args->agbno;
1291 xfs_alloc_fix_len(args);
1292 ASSERT(args->agbno + args->len <= tend);
1293
1294
1295
1296
1297
1298 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1299 args->pag, XFS_BTNUM_CNT);
1300 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1301 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1302 args->len, XFSA_FIXUP_BNO_OK);
1303 if (error) {
1304 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1305 goto error0;
1306 }
1307
1308 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1309 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1310
1311 args->wasfromfl = 0;
1312 trace_xfs_alloc_exact_done(args);
1313 return 0;
1314
1315 not_found:
1316
1317 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1318 args->agbno = NULLAGBLOCK;
1319 trace_xfs_alloc_exact_notfound(args);
1320 return 0;
1321
1322 error0:
1323 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1324 trace_xfs_alloc_exact_error(args);
1325 return error;
1326 }
1327
1328
1329
1330
1331
1332 STATIC int
1333 xfs_alloc_walk_iter(
1334 struct xfs_alloc_arg *args,
1335 struct xfs_alloc_cur *acur,
1336 struct xfs_btree_cur *cur,
1337 bool increment,
1338 bool find_one,
1339 int count,
1340 int *stat)
1341 {
1342 int error;
1343 int i;
1344
1345 *stat = 0;
1346
1347
1348
1349
1350
1351
1352 while (xfs_alloc_cur_active(cur) && count) {
1353 error = xfs_alloc_cur_check(args, acur, cur, &i);
1354 if (error)
1355 return error;
1356 if (i == 1) {
1357 *stat = 1;
1358 if (find_one)
1359 break;
1360 }
1361 if (!xfs_alloc_cur_active(cur))
1362 break;
1363
1364 if (increment)
1365 error = xfs_btree_increment(cur, 0, &i);
1366 else
1367 error = xfs_btree_decrement(cur, 0, &i);
1368 if (error)
1369 return error;
1370 if (i == 0)
1371 cur->bc_ag.abt.active = false;
1372
1373 if (count > 0)
1374 count--;
1375 }
1376
1377 return 0;
1378 }
1379
1380
1381
1382
1383
1384 STATIC int
1385 xfs_alloc_ag_vextent_locality(
1386 struct xfs_alloc_arg *args,
1387 struct xfs_alloc_cur *acur,
1388 int *stat)
1389 {
1390 struct xfs_btree_cur *fbcur = NULL;
1391 int error;
1392 int i;
1393 bool fbinc;
1394
1395 ASSERT(acur->len == 0);
1396 ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
1397
1398 *stat = 0;
1399
1400 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1401 if (error)
1402 return error;
1403 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1404 if (error)
1405 return error;
1406 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1407 if (error)
1408 return error;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432 while (xfs_alloc_cur_active(acur->bnolt) ||
1433 xfs_alloc_cur_active(acur->bnogt) ||
1434 xfs_alloc_cur_active(acur->cnt)) {
1435
1436 trace_xfs_alloc_cur_lookup(args);
1437
1438
1439
1440
1441
1442 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1443 true, 1, &i);
1444 if (error)
1445 return error;
1446 if (i == 1) {
1447 trace_xfs_alloc_cur_left(args);
1448 fbcur = acur->bnogt;
1449 fbinc = true;
1450 break;
1451 }
1452 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1453 1, &i);
1454 if (error)
1455 return error;
1456 if (i == 1) {
1457 trace_xfs_alloc_cur_right(args);
1458 fbcur = acur->bnolt;
1459 fbinc = false;
1460 break;
1461 }
1462
1463
1464
1465
1466
1467 error = xfs_alloc_cntbt_iter(args, acur);
1468 if (error)
1469 return error;
1470 if (!xfs_alloc_cur_active(acur->cnt)) {
1471 trace_xfs_alloc_cur_lookup_done(args);
1472 break;
1473 }
1474 }
1475
1476
1477
1478
1479
1480
1481 if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
1482 error = xfs_btree_decrement(acur->cnt, 0, &i);
1483 if (error)
1484 return error;
1485 if (i) {
1486 acur->cnt->bc_ag.abt.active = true;
1487 fbcur = acur->cnt;
1488 fbinc = false;
1489 }
1490 }
1491
1492
1493
1494
1495
1496 if (fbcur) {
1497 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1498 &i);
1499 if (error)
1500 return error;
1501 }
1502
1503 if (acur->len)
1504 *stat = 1;
1505
1506 return 0;
1507 }
1508
1509
1510 static int
1511 xfs_alloc_ag_vextent_lastblock(
1512 struct xfs_alloc_arg *args,
1513 struct xfs_alloc_cur *acur,
1514 xfs_agblock_t *bno,
1515 xfs_extlen_t *len,
1516 bool *allocated)
1517 {
1518 int error;
1519 int i;
1520
1521 #ifdef DEBUG
1522
1523 if (prandom_u32() & 1)
1524 return 0;
1525 #endif
1526
1527
1528
1529
1530
1531
1532
1533 if (*len || args->alignment > 1) {
1534 acur->cnt->bc_levels[0].ptr = 1;
1535 do {
1536 error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
1537 if (error)
1538 return error;
1539 if (XFS_IS_CORRUPT(args->mp, i != 1))
1540 return -EFSCORRUPTED;
1541 if (*len >= args->minlen)
1542 break;
1543 error = xfs_btree_increment(acur->cnt, 0, &i);
1544 if (error)
1545 return error;
1546 } while (i);
1547 ASSERT(*len >= args->minlen);
1548 if (!i)
1549 return 0;
1550 }
1551
1552 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1553 if (error)
1554 return error;
1555
1556
1557
1558
1559
1560 if (acur->len == 0)
1561 return 0;
1562
1563 trace_xfs_alloc_near_first(args);
1564 *allocated = true;
1565 return 0;
1566 }
1567
1568
1569
1570
1571
1572
1573
1574 STATIC int
1575 xfs_alloc_ag_vextent_near(
1576 struct xfs_alloc_arg *args)
1577 {
1578 struct xfs_alloc_cur acur = {};
1579 int error;
1580 int i;
1581 xfs_agblock_t bno;
1582 xfs_extlen_t len;
1583
1584
1585 if (!args->min_agbno && !args->max_agbno)
1586 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1587 ASSERT(args->min_agbno <= args->max_agbno);
1588
1589
1590 if (args->agbno < args->min_agbno)
1591 args->agbno = args->min_agbno;
1592 if (args->agbno > args->max_agbno)
1593 args->agbno = args->max_agbno;
1594
1595 restart:
1596 len = 0;
1597
1598
1599
1600
1601
1602
1603 error = xfs_alloc_cur_setup(args, &acur);
1604 if (error == -ENOSPC) {
1605 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1606 &len, &i);
1607 if (error)
1608 goto out;
1609 if (i == 0 || len == 0) {
1610 trace_xfs_alloc_near_noentry(args);
1611 goto out;
1612 }
1613 ASSERT(i == 1);
1614 } else if (error) {
1615 goto out;
1616 }
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626 if (xfs_btree_islastblock(acur.cnt, 0)) {
1627 bool allocated = false;
1628
1629 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1630 &allocated);
1631 if (error)
1632 goto out;
1633 if (allocated)
1634 goto alloc_finish;
1635 }
1636
1637
1638
1639
1640
1641 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1642 if (error)
1643 goto out;
1644
1645
1646
1647
1648 if (!acur.len) {
1649 if (acur.busy) {
1650 trace_xfs_alloc_near_busy(args);
1651 xfs_extent_busy_flush(args->mp, args->pag,
1652 acur.busy_gen);
1653 goto restart;
1654 }
1655 trace_xfs_alloc_size_neither(args);
1656 args->agbno = NULLAGBLOCK;
1657 goto out;
1658 }
1659
1660 alloc_finish:
1661
1662 error = xfs_alloc_cur_finish(args, &acur);
1663
1664 out:
1665 xfs_alloc_cur_close(&acur, error);
1666 return error;
1667 }
1668
1669
1670
1671
1672
1673
1674
1675 STATIC int
1676 xfs_alloc_ag_vextent_size(
1677 xfs_alloc_arg_t *args)
1678 {
1679 struct xfs_agf *agf = args->agbp->b_addr;
1680 struct xfs_btree_cur *bno_cur;
1681 struct xfs_btree_cur *cnt_cur;
1682 int error;
1683 xfs_agblock_t fbno;
1684 xfs_extlen_t flen;
1685 int i;
1686 xfs_agblock_t rbno;
1687 xfs_extlen_t rlen;
1688 bool busy;
1689 unsigned busy_gen;
1690
1691 restart:
1692
1693
1694
1695 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1696 args->pag, XFS_BTNUM_CNT);
1697 bno_cur = NULL;
1698
1699
1700
1701
1702 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1703 args->maxlen + args->alignment - 1, &i)))
1704 goto error0;
1705
1706
1707
1708
1709
1710
1711
1712
1713 if (!i) {
1714 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1715 &fbno, &flen, &i);
1716 if (error)
1717 goto error0;
1718 if (i == 0 || flen == 0) {
1719 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1720 trace_xfs_alloc_size_noentry(args);
1721 return 0;
1722 }
1723 ASSERT(i == 1);
1724 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1725 &rlen, &busy_gen);
1726 } else {
1727
1728
1729
1730 for (;;) {
1731 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1732 if (error)
1733 goto error0;
1734 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1735 error = -EFSCORRUPTED;
1736 goto error0;
1737 }
1738
1739 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1740 &rbno, &rlen, &busy_gen);
1741
1742 if (rlen >= args->maxlen)
1743 break;
1744
1745 error = xfs_btree_increment(cnt_cur, 0, &i);
1746 if (error)
1747 goto error0;
1748 if (i == 0) {
1749
1750
1751
1752
1753
1754 xfs_btree_del_cursor(cnt_cur,
1755 XFS_BTREE_NOERROR);
1756 trace_xfs_alloc_size_busy(args);
1757 xfs_extent_busy_flush(args->mp,
1758 args->pag, busy_gen);
1759 goto restart;
1760 }
1761 }
1762 }
1763
1764
1765
1766
1767
1768
1769
1770 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1771 if (XFS_IS_CORRUPT(args->mp,
1772 rlen != 0 &&
1773 (rlen > flen ||
1774 rbno + rlen > fbno + flen))) {
1775 error = -EFSCORRUPTED;
1776 goto error0;
1777 }
1778 if (rlen < args->maxlen) {
1779 xfs_agblock_t bestfbno;
1780 xfs_extlen_t bestflen;
1781 xfs_agblock_t bestrbno;
1782 xfs_extlen_t bestrlen;
1783
1784 bestrlen = rlen;
1785 bestrbno = rbno;
1786 bestflen = flen;
1787 bestfbno = fbno;
1788 for (;;) {
1789 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1790 goto error0;
1791 if (i == 0)
1792 break;
1793 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1794 &i)))
1795 goto error0;
1796 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1797 error = -EFSCORRUPTED;
1798 goto error0;
1799 }
1800 if (flen < bestrlen)
1801 break;
1802 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1803 &rbno, &rlen, &busy_gen);
1804 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1805 if (XFS_IS_CORRUPT(args->mp,
1806 rlen != 0 &&
1807 (rlen > flen ||
1808 rbno + rlen > fbno + flen))) {
1809 error = -EFSCORRUPTED;
1810 goto error0;
1811 }
1812 if (rlen > bestrlen) {
1813 bestrlen = rlen;
1814 bestrbno = rbno;
1815 bestflen = flen;
1816 bestfbno = fbno;
1817 if (rlen == args->maxlen)
1818 break;
1819 }
1820 }
1821 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1822 &i)))
1823 goto error0;
1824 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1825 error = -EFSCORRUPTED;
1826 goto error0;
1827 }
1828 rlen = bestrlen;
1829 rbno = bestrbno;
1830 flen = bestflen;
1831 fbno = bestfbno;
1832 }
1833 args->wasfromfl = 0;
1834
1835
1836
1837 args->len = rlen;
1838 if (rlen < args->minlen) {
1839 if (busy) {
1840 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1841 trace_xfs_alloc_size_busy(args);
1842 xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1843 goto restart;
1844 }
1845 goto out_nominleft;
1846 }
1847 xfs_alloc_fix_len(args);
1848
1849 rlen = args->len;
1850 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1851 error = -EFSCORRUPTED;
1852 goto error0;
1853 }
1854
1855
1856
1857 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1858 args->pag, XFS_BTNUM_BNO);
1859 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1860 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1861 goto error0;
1862 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1863 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1864 cnt_cur = bno_cur = NULL;
1865 args->len = rlen;
1866 args->agbno = rbno;
1867 if (XFS_IS_CORRUPT(args->mp,
1868 args->agbno + args->len >
1869 be32_to_cpu(agf->agf_length))) {
1870 error = -EFSCORRUPTED;
1871 goto error0;
1872 }
1873 trace_xfs_alloc_size_done(args);
1874 return 0;
1875
1876 error0:
1877 trace_xfs_alloc_size_error(args);
1878 if (cnt_cur)
1879 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1880 if (bno_cur)
1881 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1882 return error;
1883
1884 out_nominleft:
1885 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1886 trace_xfs_alloc_size_nominleft(args);
1887 args->agbno = NULLAGBLOCK;
1888 return 0;
1889 }
1890
1891
1892
1893
1894 STATIC int
1895 xfs_free_ag_extent(
1896 struct xfs_trans *tp,
1897 struct xfs_buf *agbp,
1898 xfs_agnumber_t agno,
1899 xfs_agblock_t bno,
1900 xfs_extlen_t len,
1901 const struct xfs_owner_info *oinfo,
1902 enum xfs_ag_resv_type type)
1903 {
1904 struct xfs_mount *mp;
1905 struct xfs_btree_cur *bno_cur;
1906 struct xfs_btree_cur *cnt_cur;
1907 xfs_agblock_t gtbno;
1908 xfs_extlen_t gtlen;
1909 xfs_agblock_t ltbno;
1910 xfs_extlen_t ltlen;
1911 xfs_agblock_t nbno;
1912 xfs_extlen_t nlen;
1913 int haveleft;
1914 int haveright;
1915 int i;
1916 int error;
1917 struct xfs_perag *pag = agbp->b_pag;
1918
1919 bno_cur = cnt_cur = NULL;
1920 mp = tp->t_mountp;
1921
1922 if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1923 error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo);
1924 if (error)
1925 goto error0;
1926 }
1927
1928
1929
1930
1931 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_BNO);
1932
1933
1934
1935
1936 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1937 goto error0;
1938 if (haveleft) {
1939
1940
1941
1942 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1943 goto error0;
1944 if (XFS_IS_CORRUPT(mp, i != 1)) {
1945 error = -EFSCORRUPTED;
1946 goto error0;
1947 }
1948
1949
1950
1951 if (ltbno + ltlen < bno)
1952 haveleft = 0;
1953 else {
1954
1955
1956
1957
1958
1959 if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
1960 error = -EFSCORRUPTED;
1961 goto error0;
1962 }
1963 }
1964 }
1965
1966
1967
1968
1969 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1970 goto error0;
1971 if (haveright) {
1972
1973
1974
1975 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1976 goto error0;
1977 if (XFS_IS_CORRUPT(mp, i != 1)) {
1978 error = -EFSCORRUPTED;
1979 goto error0;
1980 }
1981
1982
1983
1984 if (bno + len < gtbno)
1985 haveright = 0;
1986 else {
1987
1988
1989
1990
1991
1992 if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
1993 error = -EFSCORRUPTED;
1994 goto error0;
1995 }
1996 }
1997 }
1998
1999
2000
2001 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, pag, XFS_BTNUM_CNT);
2002
2003
2004
2005
2006 if (haveleft && haveright) {
2007
2008
2009
2010 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2011 goto error0;
2012 if (XFS_IS_CORRUPT(mp, i != 1)) {
2013 error = -EFSCORRUPTED;
2014 goto error0;
2015 }
2016 if ((error = xfs_btree_delete(cnt_cur, &i)))
2017 goto error0;
2018 if (XFS_IS_CORRUPT(mp, i != 1)) {
2019 error = -EFSCORRUPTED;
2020 goto error0;
2021 }
2022
2023
2024
2025 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2026 goto error0;
2027 if (XFS_IS_CORRUPT(mp, i != 1)) {
2028 error = -EFSCORRUPTED;
2029 goto error0;
2030 }
2031 if ((error = xfs_btree_delete(cnt_cur, &i)))
2032 goto error0;
2033 if (XFS_IS_CORRUPT(mp, i != 1)) {
2034 error = -EFSCORRUPTED;
2035 goto error0;
2036 }
2037
2038
2039
2040 if ((error = xfs_btree_delete(bno_cur, &i)))
2041 goto error0;
2042 if (XFS_IS_CORRUPT(mp, i != 1)) {
2043 error = -EFSCORRUPTED;
2044 goto error0;
2045 }
2046
2047
2048
2049 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2050 goto error0;
2051 if (XFS_IS_CORRUPT(mp, i != 1)) {
2052 error = -EFSCORRUPTED;
2053 goto error0;
2054 }
2055 #ifdef DEBUG
2056
2057
2058
2059
2060 {
2061 xfs_agblock_t xxbno;
2062 xfs_extlen_t xxlen;
2063
2064 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
2065 &i)))
2066 goto error0;
2067 if (XFS_IS_CORRUPT(mp,
2068 i != 1 ||
2069 xxbno != ltbno ||
2070 xxlen != ltlen)) {
2071 error = -EFSCORRUPTED;
2072 goto error0;
2073 }
2074 }
2075 #endif
2076
2077
2078
2079 nbno = ltbno;
2080 nlen = len + ltlen + gtlen;
2081 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2082 goto error0;
2083 }
2084
2085
2086
2087
2088 else if (haveleft) {
2089
2090
2091
2092 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
2093 goto error0;
2094 if (XFS_IS_CORRUPT(mp, i != 1)) {
2095 error = -EFSCORRUPTED;
2096 goto error0;
2097 }
2098 if ((error = xfs_btree_delete(cnt_cur, &i)))
2099 goto error0;
2100 if (XFS_IS_CORRUPT(mp, i != 1)) {
2101 error = -EFSCORRUPTED;
2102 goto error0;
2103 }
2104
2105
2106
2107
2108 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
2109 goto error0;
2110 if (XFS_IS_CORRUPT(mp, i != 1)) {
2111 error = -EFSCORRUPTED;
2112 goto error0;
2113 }
2114 nbno = ltbno;
2115 nlen = len + ltlen;
2116 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2117 goto error0;
2118 }
2119
2120
2121
2122
2123 else if (haveright) {
2124
2125
2126
2127 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
2128 goto error0;
2129 if (XFS_IS_CORRUPT(mp, i != 1)) {
2130 error = -EFSCORRUPTED;
2131 goto error0;
2132 }
2133 if ((error = xfs_btree_delete(cnt_cur, &i)))
2134 goto error0;
2135 if (XFS_IS_CORRUPT(mp, i != 1)) {
2136 error = -EFSCORRUPTED;
2137 goto error0;
2138 }
2139
2140
2141
2142
2143 nbno = bno;
2144 nlen = len + gtlen;
2145 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
2146 goto error0;
2147 }
2148
2149
2150
2151
2152 else {
2153 nbno = bno;
2154 nlen = len;
2155 if ((error = xfs_btree_insert(bno_cur, &i)))
2156 goto error0;
2157 if (XFS_IS_CORRUPT(mp, i != 1)) {
2158 error = -EFSCORRUPTED;
2159 goto error0;
2160 }
2161 }
2162 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
2163 bno_cur = NULL;
2164
2165
2166
2167 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
2168 goto error0;
2169 if (XFS_IS_CORRUPT(mp, i != 0)) {
2170 error = -EFSCORRUPTED;
2171 goto error0;
2172 }
2173 if ((error = xfs_btree_insert(cnt_cur, &i)))
2174 goto error0;
2175 if (XFS_IS_CORRUPT(mp, i != 1)) {
2176 error = -EFSCORRUPTED;
2177 goto error0;
2178 }
2179 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
2180 cnt_cur = NULL;
2181
2182
2183
2184
2185 error = xfs_alloc_update_counters(tp, agbp, len);
2186 xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
2187 if (error)
2188 goto error0;
2189
2190 XFS_STATS_INC(mp, xs_freex);
2191 XFS_STATS_ADD(mp, xs_freeb, len);
2192
2193 trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
2194
2195 return 0;
2196
2197 error0:
2198 trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
2199 if (bno_cur)
2200 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
2201 if (cnt_cur)
2202 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
2203 return error;
2204 }
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214 void
2215 xfs_alloc_compute_maxlevels(
2216 xfs_mount_t *mp)
2217 {
2218 mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
2219 (mp->m_sb.sb_agblocks + 1) / 2);
2220 ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk());
2221 }
2222
2223
2224
2225
2226
2227
2228
2229 xfs_extlen_t
2230 xfs_alloc_longest_free_extent(
2231 struct xfs_perag *pag,
2232 xfs_extlen_t need,
2233 xfs_extlen_t reserved)
2234 {
2235 xfs_extlen_t delta = 0;
2236
2237
2238
2239
2240
2241 if (need > pag->pagf_flcount)
2242 delta = need - pag->pagf_flcount;
2243
2244
2245
2246
2247
2248
2249 if (pag->pagf_freeblks - pag->pagf_longest < reserved)
2250 delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
2251
2252
2253
2254
2255
2256 if (pag->pagf_longest > delta)
2257 return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
2258 pag->pagf_longest - delta);
2259
2260
2261 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2262 }
2263
2264
2265
2266
2267
2268 unsigned int
2269 xfs_alloc_min_freelist(
2270 struct xfs_mount *mp,
2271 struct xfs_perag *pag)
2272 {
2273
2274 static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2275 const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
2276 unsigned int min_free;
2277
2278 ASSERT(mp->m_alloc_maxlevels > 0);
2279
2280
2281 min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2282 mp->m_alloc_maxlevels);
2283
2284 min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2285 mp->m_alloc_maxlevels);
2286
2287 if (xfs_has_rmapbt(mp))
2288 min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2289 mp->m_rmap_maxlevels);
2290
2291 return min_free;
2292 }
2293
2294
2295
2296
2297
2298
2299
2300 static bool
2301 xfs_alloc_space_available(
2302 struct xfs_alloc_arg *args,
2303 xfs_extlen_t min_free,
2304 int flags)
2305 {
2306 struct xfs_perag *pag = args->pag;
2307 xfs_extlen_t alloc_len, longest;
2308 xfs_extlen_t reservation;
2309 int available;
2310 xfs_extlen_t agflcount;
2311
2312 if (flags & XFS_ALLOC_FLAG_FREEING)
2313 return true;
2314
2315 reservation = xfs_ag_resv_needed(pag, args->resv);
2316
2317
2318 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2319 longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2320 if (longest < alloc_len)
2321 return false;
2322
2323
2324
2325
2326
2327
2328 agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2329 available = (int)(pag->pagf_freeblks + agflcount -
2330 reservation - min_free - args->minleft);
2331 if (available < (int)max(args->total, alloc_len))
2332 return false;
2333
2334
2335
2336
2337
2338 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2339 args->maxlen = available;
2340 ASSERT(args->maxlen > 0);
2341 ASSERT(args->maxlen >= args->minlen);
2342 }
2343
2344 return true;
2345 }
2346
2347 int
2348 xfs_free_agfl_block(
2349 struct xfs_trans *tp,
2350 xfs_agnumber_t agno,
2351 xfs_agblock_t agbno,
2352 struct xfs_buf *agbp,
2353 struct xfs_owner_info *oinfo)
2354 {
2355 int error;
2356 struct xfs_buf *bp;
2357
2358 error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2359 XFS_AG_RESV_AGFL);
2360 if (error)
2361 return error;
2362
2363 error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
2364 XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
2365 tp->t_mountp->m_bsize, 0, &bp);
2366 if (error)
2367 return error;
2368 xfs_trans_binval(tp, bp);
2369
2370 return 0;
2371 }
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 static bool
2385 xfs_agfl_needs_reset(
2386 struct xfs_mount *mp,
2387 struct xfs_agf *agf)
2388 {
2389 uint32_t f = be32_to_cpu(agf->agf_flfirst);
2390 uint32_t l = be32_to_cpu(agf->agf_fllast);
2391 uint32_t c = be32_to_cpu(agf->agf_flcount);
2392 int agfl_size = xfs_agfl_size(mp);
2393 int active;
2394
2395
2396 if (!xfs_has_crc(mp))
2397 return false;
2398
2399
2400
2401
2402
2403
2404 if (f >= agfl_size || l >= agfl_size)
2405 return true;
2406 if (c > agfl_size)
2407 return true;
2408
2409
2410
2411
2412
2413 if (c && l >= f)
2414 active = l - f + 1;
2415 else if (c)
2416 active = agfl_size - f + l + 1;
2417 else
2418 active = 0;
2419
2420 return active != c;
2421 }
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433 static void
2434 xfs_agfl_reset(
2435 struct xfs_trans *tp,
2436 struct xfs_buf *agbp,
2437 struct xfs_perag *pag)
2438 {
2439 struct xfs_mount *mp = tp->t_mountp;
2440 struct xfs_agf *agf = agbp->b_addr;
2441
2442 ASSERT(pag->pagf_agflreset);
2443 trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2444
2445 xfs_warn(mp,
2446 "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2447 "Please unmount and run xfs_repair.",
2448 pag->pag_agno, pag->pagf_flcount);
2449
2450 agf->agf_flfirst = 0;
2451 agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2452 agf->agf_flcount = 0;
2453 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2454 XFS_AGF_FLCOUNT);
2455
2456 pag->pagf_flcount = 0;
2457 pag->pagf_agflreset = false;
2458 }
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471 STATIC void
2472 xfs_defer_agfl_block(
2473 struct xfs_trans *tp,
2474 xfs_agnumber_t agno,
2475 xfs_fsblock_t agbno,
2476 struct xfs_owner_info *oinfo)
2477 {
2478 struct xfs_mount *mp = tp->t_mountp;
2479 struct xfs_extent_free_item *new;
2480
2481 ASSERT(xfs_extfree_item_cache != NULL);
2482 ASSERT(oinfo != NULL);
2483
2484 new = kmem_cache_zalloc(xfs_extfree_item_cache,
2485 GFP_KERNEL | __GFP_NOFAIL);
2486 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2487 new->xefi_blockcount = 1;
2488 new->xefi_owner = oinfo->oi_owner;
2489
2490 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2491
2492 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2493 }
2494
2495
2496
2497
2498
2499 void
2500 __xfs_free_extent_later(
2501 struct xfs_trans *tp,
2502 xfs_fsblock_t bno,
2503 xfs_filblks_t len,
2504 const struct xfs_owner_info *oinfo,
2505 bool skip_discard)
2506 {
2507 struct xfs_extent_free_item *new;
2508 #ifdef DEBUG
2509 struct xfs_mount *mp = tp->t_mountp;
2510 xfs_agnumber_t agno;
2511 xfs_agblock_t agbno;
2512
2513 ASSERT(bno != NULLFSBLOCK);
2514 ASSERT(len > 0);
2515 ASSERT(len <= XFS_MAX_BMBT_EXTLEN);
2516 ASSERT(!isnullstartblock(bno));
2517 agno = XFS_FSB_TO_AGNO(mp, bno);
2518 agbno = XFS_FSB_TO_AGBNO(mp, bno);
2519 ASSERT(agno < mp->m_sb.sb_agcount);
2520 ASSERT(agbno < mp->m_sb.sb_agblocks);
2521 ASSERT(len < mp->m_sb.sb_agblocks);
2522 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
2523 #endif
2524 ASSERT(xfs_extfree_item_cache != NULL);
2525
2526 new = kmem_cache_zalloc(xfs_extfree_item_cache,
2527 GFP_KERNEL | __GFP_NOFAIL);
2528 new->xefi_startblock = bno;
2529 new->xefi_blockcount = (xfs_extlen_t)len;
2530 if (skip_discard)
2531 new->xefi_flags |= XFS_EFI_SKIP_DISCARD;
2532 if (oinfo) {
2533 ASSERT(oinfo->oi_offset == 0);
2534
2535 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2536 new->xefi_flags |= XFS_EFI_ATTR_FORK;
2537 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2538 new->xefi_flags |= XFS_EFI_BMBT_BLOCK;
2539 new->xefi_owner = oinfo->oi_owner;
2540 } else {
2541 new->xefi_owner = XFS_RMAP_OWN_NULL;
2542 }
2543 trace_xfs_bmap_free_defer(tp->t_mountp,
2544 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
2545 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
2546 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
2547 }
2548
2549 #ifdef DEBUG
2550
2551
2552
2553
2554 STATIC int
2555 xfs_exact_minlen_extent_available(
2556 struct xfs_alloc_arg *args,
2557 struct xfs_buf *agbp,
2558 int *stat)
2559 {
2560 struct xfs_btree_cur *cnt_cur;
2561 xfs_agblock_t fbno;
2562 xfs_extlen_t flen;
2563 int error = 0;
2564
2565 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2566 args->pag, XFS_BTNUM_CNT);
2567 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2568 if (error)
2569 goto out;
2570
2571 if (*stat == 0) {
2572 error = -EFSCORRUPTED;
2573 goto out;
2574 }
2575
2576 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat);
2577 if (error)
2578 goto out;
2579
2580 if (*stat == 1 && flen != args->minlen)
2581 *stat = 0;
2582
2583 out:
2584 xfs_btree_del_cursor(cnt_cur, error);
2585
2586 return error;
2587 }
2588 #endif
2589
2590
2591
2592
2593
2594 int
2595 xfs_alloc_fix_freelist(
2596 struct xfs_alloc_arg *args,
2597 int flags)
2598 {
2599 struct xfs_mount *mp = args->mp;
2600 struct xfs_perag *pag = args->pag;
2601 struct xfs_trans *tp = args->tp;
2602 struct xfs_buf *agbp = NULL;
2603 struct xfs_buf *agflbp = NULL;
2604 struct xfs_alloc_arg targs;
2605 xfs_agblock_t bno;
2606 xfs_extlen_t need;
2607 int error = 0;
2608
2609
2610 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2611
2612 if (!pag->pagf_init) {
2613 error = xfs_alloc_read_agf(pag, tp, flags, &agbp);
2614 if (error) {
2615
2616 if (error == -EAGAIN)
2617 error = 0;
2618 goto out_no_agbp;
2619 }
2620 }
2621
2622
2623
2624
2625
2626
2627 if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
2628 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2629 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2630 goto out_agbp_relse;
2631 }
2632
2633 need = xfs_alloc_min_freelist(mp, pag);
2634 if (!xfs_alloc_space_available(args, need, flags |
2635 XFS_ALLOC_FLAG_CHECK))
2636 goto out_agbp_relse;
2637
2638
2639
2640
2641
2642 if (!agbp) {
2643 error = xfs_alloc_read_agf(pag, tp, flags, &agbp);
2644 if (error) {
2645
2646 if (error == -EAGAIN)
2647 error = 0;
2648 goto out_no_agbp;
2649 }
2650 }
2651
2652
2653 if (pag->pagf_agflreset)
2654 xfs_agfl_reset(tp, agbp, pag);
2655
2656
2657 need = xfs_alloc_min_freelist(mp, pag);
2658 if (!xfs_alloc_space_available(args, need, flags))
2659 goto out_agbp_relse;
2660
2661 #ifdef DEBUG
2662 if (args->alloc_minlen_only) {
2663 int stat;
2664
2665 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2666 if (error || !stat)
2667 goto out_agbp_relse;
2668 }
2669 #endif
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694 memset(&targs, 0, sizeof(targs));
2695
2696 if (flags & XFS_ALLOC_FLAG_NORMAP)
2697 targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2698 else
2699 targs.oinfo = XFS_RMAP_OINFO_AG;
2700 while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2701 error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0);
2702 if (error)
2703 goto out_agbp_relse;
2704
2705
2706 xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2707 }
2708
2709 targs.tp = tp;
2710 targs.mp = mp;
2711 targs.agbp = agbp;
2712 targs.agno = args->agno;
2713 targs.alignment = targs.minlen = targs.prod = 1;
2714 targs.type = XFS_ALLOCTYPE_THIS_AG;
2715 targs.pag = pag;
2716 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2717 if (error)
2718 goto out_agbp_relse;
2719
2720
2721 while (pag->pagf_flcount < need) {
2722 targs.agbno = 0;
2723 targs.maxlen = need - pag->pagf_flcount;
2724 targs.resv = XFS_AG_RESV_AGFL;
2725
2726
2727 error = xfs_alloc_ag_vextent(&targs);
2728 if (error)
2729 goto out_agflbp_relse;
2730
2731
2732
2733
2734
2735
2736 if (targs.agbno == NULLAGBLOCK) {
2737 if (flags & XFS_ALLOC_FLAG_FREEING)
2738 break;
2739 goto out_agflbp_relse;
2740 }
2741
2742
2743
2744 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2745 error = xfs_alloc_put_freelist(pag, tp, agbp,
2746 agflbp, bno, 0);
2747 if (error)
2748 goto out_agflbp_relse;
2749 }
2750 }
2751 xfs_trans_brelse(tp, agflbp);
2752 args->agbp = agbp;
2753 return 0;
2754
2755 out_agflbp_relse:
2756 xfs_trans_brelse(tp, agflbp);
2757 out_agbp_relse:
2758 if (agbp)
2759 xfs_trans_brelse(tp, agbp);
2760 out_no_agbp:
2761 args->agbp = NULL;
2762 return error;
2763 }
2764
2765
2766
2767
2768
2769 int
2770 xfs_alloc_get_freelist(
2771 struct xfs_perag *pag,
2772 struct xfs_trans *tp,
2773 struct xfs_buf *agbp,
2774 xfs_agblock_t *bnop,
2775 int btreeblk)
2776 {
2777 struct xfs_agf *agf = agbp->b_addr;
2778 struct xfs_buf *agflbp;
2779 xfs_agblock_t bno;
2780 __be32 *agfl_bno;
2781 int error;
2782 uint32_t logflags;
2783 struct xfs_mount *mp = tp->t_mountp;
2784
2785
2786
2787
2788 if (!agf->agf_flcount) {
2789 *bnop = NULLAGBLOCK;
2790 return 0;
2791 }
2792
2793
2794
2795 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2796 if (error)
2797 return error;
2798
2799
2800
2801
2802
2803 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2804 bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2805 be32_add_cpu(&agf->agf_flfirst, 1);
2806 xfs_trans_brelse(tp, agflbp);
2807 if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2808 agf->agf_flfirst = 0;
2809
2810 ASSERT(!pag->pagf_agflreset);
2811 be32_add_cpu(&agf->agf_flcount, -1);
2812 pag->pagf_flcount--;
2813
2814 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2815 if (btreeblk) {
2816 be32_add_cpu(&agf->agf_btreeblks, 1);
2817 pag->pagf_btreeblks++;
2818 logflags |= XFS_AGF_BTREEBLKS;
2819 }
2820
2821 xfs_alloc_log_agf(tp, agbp, logflags);
2822 *bnop = bno;
2823
2824 return 0;
2825 }
2826
2827
2828
2829
2830 void
2831 xfs_alloc_log_agf(
2832 struct xfs_trans *tp,
2833 struct xfs_buf *bp,
2834 uint32_t fields)
2835 {
2836 int first;
2837 int last;
2838 static const short offsets[] = {
2839 offsetof(xfs_agf_t, agf_magicnum),
2840 offsetof(xfs_agf_t, agf_versionnum),
2841 offsetof(xfs_agf_t, agf_seqno),
2842 offsetof(xfs_agf_t, agf_length),
2843 offsetof(xfs_agf_t, agf_roots[0]),
2844 offsetof(xfs_agf_t, agf_levels[0]),
2845 offsetof(xfs_agf_t, agf_flfirst),
2846 offsetof(xfs_agf_t, agf_fllast),
2847 offsetof(xfs_agf_t, agf_flcount),
2848 offsetof(xfs_agf_t, agf_freeblks),
2849 offsetof(xfs_agf_t, agf_longest),
2850 offsetof(xfs_agf_t, agf_btreeblks),
2851 offsetof(xfs_agf_t, agf_uuid),
2852 offsetof(xfs_agf_t, agf_rmap_blocks),
2853 offsetof(xfs_agf_t, agf_refcount_blocks),
2854 offsetof(xfs_agf_t, agf_refcount_root),
2855 offsetof(xfs_agf_t, agf_refcount_level),
2856
2857 offsetof(xfs_agf_t, agf_spare64),
2858 sizeof(xfs_agf_t)
2859 };
2860
2861 trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
2862
2863 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2864
2865 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2866 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2867 }
2868
2869
2870
2871
2872 int
2873 xfs_alloc_put_freelist(
2874 struct xfs_perag *pag,
2875 struct xfs_trans *tp,
2876 struct xfs_buf *agbp,
2877 struct xfs_buf *agflbp,
2878 xfs_agblock_t bno,
2879 int btreeblk)
2880 {
2881 struct xfs_mount *mp = tp->t_mountp;
2882 struct xfs_agf *agf = agbp->b_addr;
2883 __be32 *blockp;
2884 int error;
2885 uint32_t logflags;
2886 __be32 *agfl_bno;
2887 int startoff;
2888
2889 if (!agflbp) {
2890 error = xfs_alloc_read_agfl(pag, tp, &agflbp);
2891 if (error)
2892 return error;
2893 }
2894
2895 be32_add_cpu(&agf->agf_fllast, 1);
2896 if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2897 agf->agf_fllast = 0;
2898
2899 ASSERT(!pag->pagf_agflreset);
2900 be32_add_cpu(&agf->agf_flcount, 1);
2901 pag->pagf_flcount++;
2902
2903 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2904 if (btreeblk) {
2905 be32_add_cpu(&agf->agf_btreeblks, -1);
2906 pag->pagf_btreeblks--;
2907 logflags |= XFS_AGF_BTREEBLKS;
2908 }
2909
2910 xfs_alloc_log_agf(tp, agbp, logflags);
2911
2912 ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2913
2914 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
2915 blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2916 *blockp = cpu_to_be32(bno);
2917 startoff = (char *)blockp - (char *)agflbp->b_addr;
2918
2919 xfs_alloc_log_agf(tp, agbp, logflags);
2920
2921 xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2922 xfs_trans_log_buf(tp, agflbp, startoff,
2923 startoff + sizeof(xfs_agblock_t) - 1);
2924 return 0;
2925 }
2926
2927 static xfs_failaddr_t
2928 xfs_agf_verify(
2929 struct xfs_buf *bp)
2930 {
2931 struct xfs_mount *mp = bp->b_mount;
2932 struct xfs_agf *agf = bp->b_addr;
2933
2934 if (xfs_has_crc(mp)) {
2935 if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2936 return __this_address;
2937 if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
2938 return __this_address;
2939 }
2940
2941 if (!xfs_verify_magic(bp, agf->agf_magicnum))
2942 return __this_address;
2943
2944 if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2945 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2946 be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2947 be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2948 be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2949 return __this_address;
2950
2951 if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
2952 return __this_address;
2953
2954 if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
2955 be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
2956 return __this_address;
2957
2958 if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2959 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2960 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) >
2961 mp->m_alloc_maxlevels ||
2962 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) >
2963 mp->m_alloc_maxlevels)
2964 return __this_address;
2965
2966 if (xfs_has_rmapbt(mp) &&
2967 (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2968 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) >
2969 mp->m_rmap_maxlevels))
2970 return __this_address;
2971
2972 if (xfs_has_rmapbt(mp) &&
2973 be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
2974 return __this_address;
2975
2976
2977
2978
2979
2980
2981
2982 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2983 return __this_address;
2984
2985 if (xfs_has_lazysbcount(mp) &&
2986 be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2987 return __this_address;
2988
2989 if (xfs_has_reflink(mp) &&
2990 be32_to_cpu(agf->agf_refcount_blocks) >
2991 be32_to_cpu(agf->agf_length))
2992 return __this_address;
2993
2994 if (xfs_has_reflink(mp) &&
2995 (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2996 be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels))
2997 return __this_address;
2998
2999 return NULL;
3000
3001 }
3002
3003 static void
3004 xfs_agf_read_verify(
3005 struct xfs_buf *bp)
3006 {
3007 struct xfs_mount *mp = bp->b_mount;
3008 xfs_failaddr_t fa;
3009
3010 if (xfs_has_crc(mp) &&
3011 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
3012 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
3013 else {
3014 fa = xfs_agf_verify(bp);
3015 if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
3016 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3017 }
3018 }
3019
3020 static void
3021 xfs_agf_write_verify(
3022 struct xfs_buf *bp)
3023 {
3024 struct xfs_mount *mp = bp->b_mount;
3025 struct xfs_buf_log_item *bip = bp->b_log_item;
3026 struct xfs_agf *agf = bp->b_addr;
3027 xfs_failaddr_t fa;
3028
3029 fa = xfs_agf_verify(bp);
3030 if (fa) {
3031 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
3032 return;
3033 }
3034
3035 if (!xfs_has_crc(mp))
3036 return;
3037
3038 if (bip)
3039 agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
3040
3041 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
3042 }
3043
3044 const struct xfs_buf_ops xfs_agf_buf_ops = {
3045 .name = "xfs_agf",
3046 .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
3047 .verify_read = xfs_agf_read_verify,
3048 .verify_write = xfs_agf_write_verify,
3049 .verify_struct = xfs_agf_verify,
3050 };
3051
3052
3053
3054
3055 int
3056 xfs_read_agf(
3057 struct xfs_perag *pag,
3058 struct xfs_trans *tp,
3059 int flags,
3060 struct xfs_buf **agfbpp)
3061 {
3062 struct xfs_mount *mp = pag->pag_mount;
3063 int error;
3064
3065 trace_xfs_read_agf(pag->pag_mount, pag->pag_agno);
3066
3067 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3068 XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGF_DADDR(mp)),
3069 XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops);
3070 if (error)
3071 return error;
3072
3073 xfs_buf_set_ref(*agfbpp, XFS_AGF_REF);
3074 return 0;
3075 }
3076
3077
3078
3079
3080
3081
3082 int
3083 xfs_alloc_read_agf(
3084 struct xfs_perag *pag,
3085 struct xfs_trans *tp,
3086 int flags,
3087 struct xfs_buf **agfbpp)
3088 {
3089 struct xfs_buf *agfbp;
3090 struct xfs_agf *agf;
3091 int error;
3092 int allocbt_blks;
3093
3094 trace_xfs_alloc_read_agf(pag->pag_mount, pag->pag_agno);
3095
3096
3097 ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
3098 (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
3099 error = xfs_read_agf(pag, tp,
3100 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
3101 &agfbp);
3102 if (error)
3103 return error;
3104
3105 agf = agfbp->b_addr;
3106 if (!pag->pagf_init) {
3107 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
3108 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
3109 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
3110 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
3111 pag->pagf_levels[XFS_BTNUM_BNOi] =
3112 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
3113 pag->pagf_levels[XFS_BTNUM_CNTi] =
3114 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
3115 pag->pagf_levels[XFS_BTNUM_RMAPi] =
3116 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
3117 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
3118 pag->pagf_init = 1;
3119 pag->pagf_agflreset = xfs_agfl_needs_reset(pag->pag_mount, agf);
3120
3121
3122
3123
3124
3125
3126
3127
3128 allocbt_blks = pag->pagf_btreeblks;
3129 if (xfs_has_rmapbt(pag->pag_mount))
3130 allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
3131 if (allocbt_blks > 0)
3132 atomic64_add(allocbt_blks,
3133 &pag->pag_mount->m_allocbt_blks);
3134 }
3135 #ifdef DEBUG
3136 else if (!xfs_is_shutdown(pag->pag_mount)) {
3137 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
3138 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
3139 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
3140 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
3141 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
3142 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
3143 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
3144 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
3145 }
3146 #endif
3147 if (agfbpp)
3148 *agfbpp = agfbp;
3149 else
3150 xfs_trans_brelse(tp, agfbp);
3151 return 0;
3152 }
3153
3154
3155
3156
3157
3158
3159 int
3160 xfs_alloc_vextent(
3161 struct xfs_alloc_arg *args)
3162 {
3163 xfs_agblock_t agsize;
3164 int error;
3165 int flags;
3166 struct xfs_mount *mp;
3167 xfs_agnumber_t sagno;
3168 xfs_alloctype_t type;
3169 int bump_rotor = 0;
3170 xfs_agnumber_t rotorstep = xfs_rotorstep;
3171
3172 mp = args->mp;
3173 type = args->otype = args->type;
3174 args->agbno = NULLAGBLOCK;
3175
3176
3177
3178
3179
3180 agsize = mp->m_sb.sb_agblocks;
3181 if (args->maxlen > agsize)
3182 args->maxlen = agsize;
3183 if (args->alignment == 0)
3184 args->alignment = 1;
3185 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
3186 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
3187 ASSERT(args->minlen <= args->maxlen);
3188 ASSERT(args->minlen <= agsize);
3189 ASSERT(args->mod < args->prod);
3190 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
3191 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
3192 args->minlen > args->maxlen || args->minlen > agsize ||
3193 args->mod >= args->prod) {
3194 args->fsbno = NULLFSBLOCK;
3195 trace_xfs_alloc_vextent_badargs(args);
3196 return 0;
3197 }
3198
3199 switch (type) {
3200 case XFS_ALLOCTYPE_THIS_AG:
3201 case XFS_ALLOCTYPE_NEAR_BNO:
3202 case XFS_ALLOCTYPE_THIS_BNO:
3203
3204
3205
3206 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3207 args->pag = xfs_perag_get(mp, args->agno);
3208 error = xfs_alloc_fix_freelist(args, 0);
3209 if (error) {
3210 trace_xfs_alloc_vextent_nofix(args);
3211 goto error0;
3212 }
3213 if (!args->agbp) {
3214 trace_xfs_alloc_vextent_noagbp(args);
3215 break;
3216 }
3217 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3218 if ((error = xfs_alloc_ag_vextent(args)))
3219 goto error0;
3220 break;
3221 case XFS_ALLOCTYPE_START_BNO:
3222
3223
3224
3225
3226 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3227 xfs_is_inode32(mp)) {
3228 args->fsbno = XFS_AGB_TO_FSB(mp,
3229 ((mp->m_agfrotor / rotorstep) %
3230 mp->m_sb.sb_agcount), 0);
3231 bump_rotor = 1;
3232 }
3233 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
3234 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3235 fallthrough;
3236 case XFS_ALLOCTYPE_FIRST_AG:
3237
3238
3239
3240 if (type == XFS_ALLOCTYPE_FIRST_AG) {
3241
3242
3243
3244 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3245 args->type = XFS_ALLOCTYPE_THIS_AG;
3246 sagno = 0;
3247 flags = 0;
3248 } else {
3249
3250
3251
3252 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
3253 flags = XFS_ALLOC_FLAG_TRYLOCK;
3254 }
3255
3256
3257
3258
3259 for (;;) {
3260 args->pag = xfs_perag_get(mp, args->agno);
3261 error = xfs_alloc_fix_freelist(args, flags);
3262 if (error) {
3263 trace_xfs_alloc_vextent_nofix(args);
3264 goto error0;
3265 }
3266
3267
3268
3269 if (args->agbp) {
3270 if ((error = xfs_alloc_ag_vextent(args)))
3271 goto error0;
3272 break;
3273 }
3274
3275 trace_xfs_alloc_vextent_loopfailed(args);
3276
3277
3278
3279
3280 if (args->agno == sagno &&
3281 type == XFS_ALLOCTYPE_START_BNO)
3282 args->type = XFS_ALLOCTYPE_THIS_AG;
3283
3284
3285
3286
3287
3288
3289
3290 if (++(args->agno) == mp->m_sb.sb_agcount) {
3291 if (args->tp->t_firstblock != NULLFSBLOCK)
3292 args->agno = sagno;
3293 else
3294 args->agno = 0;
3295 }
3296
3297
3298
3299
3300 if (args->agno == sagno) {
3301 if (flags == 0) {
3302 args->agbno = NULLAGBLOCK;
3303 trace_xfs_alloc_vextent_allfailed(args);
3304 break;
3305 }
3306
3307 flags = 0;
3308 if (type == XFS_ALLOCTYPE_START_BNO) {
3309 args->agbno = XFS_FSB_TO_AGBNO(mp,
3310 args->fsbno);
3311 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3312 }
3313 }
3314 xfs_perag_put(args->pag);
3315 }
3316 if (bump_rotor) {
3317 if (args->agno == sagno)
3318 mp->m_agfrotor = (mp->m_agfrotor + 1) %
3319 (mp->m_sb.sb_agcount * rotorstep);
3320 else
3321 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3322 (mp->m_sb.sb_agcount * rotorstep);
3323 }
3324 break;
3325 default:
3326 ASSERT(0);
3327
3328 }
3329 if (args->agbno == NULLAGBLOCK)
3330 args->fsbno = NULLFSBLOCK;
3331 else {
3332 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3333 #ifdef DEBUG
3334 ASSERT(args->len >= args->minlen);
3335 ASSERT(args->len <= args->maxlen);
3336 ASSERT(args->agbno % args->alignment == 0);
3337 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
3338 args->len);
3339 #endif
3340
3341 }
3342 xfs_perag_put(args->pag);
3343 return 0;
3344 error0:
3345 xfs_perag_put(args->pag);
3346 return error;
3347 }
3348
3349
3350 int
3351 xfs_free_extent_fix_freelist(
3352 struct xfs_trans *tp,
3353 struct xfs_perag *pag,
3354 struct xfs_buf **agbp)
3355 {
3356 struct xfs_alloc_arg args;
3357 int error;
3358
3359 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3360 args.tp = tp;
3361 args.mp = tp->t_mountp;
3362 args.agno = pag->pag_agno;
3363 args.pag = pag;
3364
3365
3366
3367
3368
3369 if (args.agno >= args.mp->m_sb.sb_agcount)
3370 return -EFSCORRUPTED;
3371
3372 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3373 if (error)
3374 return error;
3375
3376 *agbp = args.agbp;
3377 return 0;
3378 }
3379
3380
3381
3382
3383
3384
3385 int
3386 __xfs_free_extent(
3387 struct xfs_trans *tp,
3388 xfs_fsblock_t bno,
3389 xfs_extlen_t len,
3390 const struct xfs_owner_info *oinfo,
3391 enum xfs_ag_resv_type type,
3392 bool skip_discard)
3393 {
3394 struct xfs_mount *mp = tp->t_mountp;
3395 struct xfs_buf *agbp;
3396 xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
3397 xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
3398 struct xfs_agf *agf;
3399 int error;
3400 unsigned int busy_flags = 0;
3401 struct xfs_perag *pag;
3402
3403 ASSERT(len != 0);
3404 ASSERT(type != XFS_AG_RESV_AGFL);
3405
3406 if (XFS_TEST_ERROR(false, mp,
3407 XFS_ERRTAG_FREE_EXTENT))
3408 return -EIO;
3409
3410 pag = xfs_perag_get(mp, agno);
3411 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
3412 if (error)
3413 goto err;
3414 agf = agbp->b_addr;
3415
3416 if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
3417 error = -EFSCORRUPTED;
3418 goto err_release;
3419 }
3420
3421
3422 if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
3423 error = -EFSCORRUPTED;
3424 goto err_release;
3425 }
3426
3427 error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3428 if (error)
3429 goto err_release;
3430
3431 if (skip_discard)
3432 busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3433 xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags);
3434 xfs_perag_put(pag);
3435 return 0;
3436
3437 err_release:
3438 xfs_trans_brelse(tp, agbp);
3439 err:
3440 xfs_perag_put(pag);
3441 return error;
3442 }
3443
3444 struct xfs_alloc_query_range_info {
3445 xfs_alloc_query_range_fn fn;
3446 void *priv;
3447 };
3448
3449
3450 STATIC int
3451 xfs_alloc_query_range_helper(
3452 struct xfs_btree_cur *cur,
3453 const union xfs_btree_rec *rec,
3454 void *priv)
3455 {
3456 struct xfs_alloc_query_range_info *query = priv;
3457 struct xfs_alloc_rec_incore irec;
3458
3459 irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3460 irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3461 return query->fn(cur, &irec, query->priv);
3462 }
3463
3464
3465 int
3466 xfs_alloc_query_range(
3467 struct xfs_btree_cur *cur,
3468 const struct xfs_alloc_rec_incore *low_rec,
3469 const struct xfs_alloc_rec_incore *high_rec,
3470 xfs_alloc_query_range_fn fn,
3471 void *priv)
3472 {
3473 union xfs_btree_irec low_brec;
3474 union xfs_btree_irec high_brec;
3475 struct xfs_alloc_query_range_info query;
3476
3477 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3478 low_brec.a = *low_rec;
3479 high_brec.a = *high_rec;
3480 query.priv = priv;
3481 query.fn = fn;
3482 return xfs_btree_query_range(cur, &low_brec, &high_brec,
3483 xfs_alloc_query_range_helper, &query);
3484 }
3485
3486
3487 int
3488 xfs_alloc_query_all(
3489 struct xfs_btree_cur *cur,
3490 xfs_alloc_query_range_fn fn,
3491 void *priv)
3492 {
3493 struct xfs_alloc_query_range_info query;
3494
3495 ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3496 query.priv = priv;
3497 query.fn = fn;
3498 return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3499 }
3500
3501
3502 int
3503 xfs_alloc_has_record(
3504 struct xfs_btree_cur *cur,
3505 xfs_agblock_t bno,
3506 xfs_extlen_t len,
3507 bool *exists)
3508 {
3509 union xfs_btree_irec low;
3510 union xfs_btree_irec high;
3511
3512 memset(&low, 0, sizeof(low));
3513 low.a.ar_startblock = bno;
3514 memset(&high, 0xFF, sizeof(high));
3515 high.a.ar_startblock = bno + len - 1;
3516
3517 return xfs_btree_has_record(cur, &low, &high, exists);
3518 }
3519
3520
3521
3522
3523
3524 int
3525 xfs_agfl_walk(
3526 struct xfs_mount *mp,
3527 struct xfs_agf *agf,
3528 struct xfs_buf *agflbp,
3529 xfs_agfl_walk_fn walk_fn,
3530 void *priv)
3531 {
3532 __be32 *agfl_bno;
3533 unsigned int i;
3534 int error;
3535
3536 agfl_bno = xfs_buf_to_agfl_bno(agflbp);
3537 i = be32_to_cpu(agf->agf_flfirst);
3538
3539
3540 if (agf->agf_flcount == cpu_to_be32(0))
3541 return 0;
3542
3543
3544 for (;;) {
3545 error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3546 if (error)
3547 return error;
3548 if (i == be32_to_cpu(agf->agf_fllast))
3549 break;
3550 if (++i == xfs_agfl_size(mp))
3551 i = 0;
3552 }
3553
3554 return 0;
3555 }
3556
3557 int __init
3558 xfs_extfree_intent_init_cache(void)
3559 {
3560 xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent",
3561 sizeof(struct xfs_extent_free_item),
3562 0, 0, NULL);
3563
3564 return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM;
3565 }
3566
3567 void
3568 xfs_extfree_intent_destroy_cache(void)
3569 {
3570 kmem_cache_destroy(xfs_extfree_item_cache);
3571 xfs_extfree_item_cache = NULL;
3572 }