0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_btree.h"
0011 #include "xfs_rmap.h"
0012 #include "xfs_refcount.h"
0013 #include "scrub/scrub.h"
0014 #include "scrub/common.h"
0015 #include "scrub/btree.h"
0016 #include "xfs_trans_resv.h"
0017 #include "xfs_mount.h"
0018 #include "xfs_ag.h"
0019
0020
0021
0022
0023 int
0024 xchk_setup_ag_refcountbt(
0025 struct xfs_scrub *sc)
0026 {
0027 return xchk_setup_ag_btree(sc, false);
0028 }
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 struct xchk_refcnt_frag {
0069 struct list_head list;
0070 struct xfs_rmap_irec rm;
0071 };
0072
0073 struct xchk_refcnt_check {
0074 struct xfs_scrub *sc;
0075 struct list_head fragments;
0076
0077
0078 xfs_agblock_t bno;
0079 xfs_extlen_t len;
0080 xfs_nlink_t refcount;
0081
0082
0083 xfs_nlink_t seen;
0084 };
0085
0086
0087
0088
0089
0090
0091
0092
0093 STATIC int
0094 xchk_refcountbt_rmap_check(
0095 struct xfs_btree_cur *cur,
0096 const struct xfs_rmap_irec *rec,
0097 void *priv)
0098 {
0099 struct xchk_refcnt_check *refchk = priv;
0100 struct xchk_refcnt_frag *frag;
0101 xfs_agblock_t rm_last;
0102 xfs_agblock_t rc_last;
0103 int error = 0;
0104
0105 if (xchk_should_terminate(refchk->sc, &error))
0106 return error;
0107
0108 rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
0109 rc_last = refchk->bno + refchk->len - 1;
0110
0111
0112 if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
0113 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
0114 return 0;
0115 }
0116
0117 if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
0118
0119
0120
0121
0122 refchk->seen++;
0123 } else {
0124
0125
0126
0127
0128
0129
0130 frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
0131 KM_MAYFAIL);
0132 if (!frag)
0133 return -ENOMEM;
0134 memcpy(&frag->rm, rec, sizeof(frag->rm));
0135 list_add_tail(&frag->list, &refchk->fragments);
0136 }
0137
0138 return 0;
0139 }
0140
0141
0142
0143
0144
0145
0146
0147
0148 STATIC void
0149 xchk_refcountbt_process_rmap_fragments(
0150 struct xchk_refcnt_check *refchk)
0151 {
0152 struct list_head worklist;
0153 struct xchk_refcnt_frag *frag;
0154 struct xchk_refcnt_frag *n;
0155 xfs_agblock_t bno;
0156 xfs_agblock_t rbno;
0157 xfs_agblock_t next_rbno;
0158 xfs_nlink_t nr;
0159 xfs_nlink_t target_nr;
0160
0161 target_nr = refchk->refcount - refchk->seen;
0162 if (target_nr == 0)
0163 return;
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 INIT_LIST_HEAD(&worklist);
0174 rbno = NULLAGBLOCK;
0175
0176
0177 bno = 0;
0178 list_for_each_entry(frag, &refchk->fragments, list) {
0179 if (frag->rm.rm_startblock < bno)
0180 goto done;
0181 bno = frag->rm.rm_startblock;
0182 }
0183
0184
0185
0186
0187
0188 nr = 0;
0189 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
0190 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
0191 break;
0192 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
0193 if (bno < rbno)
0194 rbno = bno;
0195 list_move_tail(&frag->list, &worklist);
0196 nr++;
0197 }
0198
0199
0200
0201
0202
0203 if (nr != target_nr)
0204 goto done;
0205
0206 while (!list_empty(&refchk->fragments)) {
0207
0208 nr = 0;
0209 next_rbno = NULLAGBLOCK;
0210 list_for_each_entry_safe(frag, n, &worklist, list) {
0211 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
0212 if (bno != rbno) {
0213 if (bno < next_rbno)
0214 next_rbno = bno;
0215 continue;
0216 }
0217 list_del(&frag->list);
0218 kmem_free(frag);
0219 nr++;
0220 }
0221
0222
0223 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
0224 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
0225 if (frag->rm.rm_startblock != rbno)
0226 goto done;
0227 list_move_tail(&frag->list, &worklist);
0228 if (next_rbno > bno)
0229 next_rbno = bno;
0230 nr--;
0231 if (nr == 0)
0232 break;
0233 }
0234
0235
0236
0237
0238
0239
0240
0241 if (nr)
0242 goto done;
0243
0244 rbno = next_rbno;
0245 }
0246
0247
0248
0249
0250
0251 if (rbno < refchk->bno + refchk->len)
0252 goto done;
0253
0254
0255 refchk->seen = refchk->refcount;
0256 done:
0257
0258 list_for_each_entry_safe(frag, n, &worklist, list) {
0259 list_del(&frag->list);
0260 kmem_free(frag);
0261 }
0262 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
0263 list_del(&frag->list);
0264 kmem_free(frag);
0265 }
0266 }
0267
0268
0269 STATIC void
0270 xchk_refcountbt_xref_rmap(
0271 struct xfs_scrub *sc,
0272 xfs_agblock_t bno,
0273 xfs_extlen_t len,
0274 xfs_nlink_t refcount)
0275 {
0276 struct xchk_refcnt_check refchk = {
0277 .sc = sc,
0278 .bno = bno,
0279 .len = len,
0280 .refcount = refcount,
0281 .seen = 0,
0282 };
0283 struct xfs_rmap_irec low;
0284 struct xfs_rmap_irec high;
0285 struct xchk_refcnt_frag *frag;
0286 struct xchk_refcnt_frag *n;
0287 int error;
0288
0289 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
0290 return;
0291
0292
0293 memset(&low, 0, sizeof(low));
0294 low.rm_startblock = bno;
0295 memset(&high, 0xFF, sizeof(high));
0296 high.rm_startblock = bno + len - 1;
0297
0298 INIT_LIST_HEAD(&refchk.fragments);
0299 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
0300 &xchk_refcountbt_rmap_check, &refchk);
0301 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0302 goto out_free;
0303
0304 xchk_refcountbt_process_rmap_fragments(&refchk);
0305 if (refcount != refchk.seen)
0306 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
0307
0308 out_free:
0309 list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
0310 list_del(&frag->list);
0311 kmem_free(frag);
0312 }
0313 }
0314
0315
0316 STATIC void
0317 xchk_refcountbt_xref(
0318 struct xfs_scrub *sc,
0319 xfs_agblock_t agbno,
0320 xfs_extlen_t len,
0321 xfs_nlink_t refcount)
0322 {
0323 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
0324 return;
0325
0326 xchk_xref_is_used_space(sc, agbno, len);
0327 xchk_xref_is_not_inode_chunk(sc, agbno, len);
0328 xchk_refcountbt_xref_rmap(sc, agbno, len, refcount);
0329 }
0330
0331
0332 STATIC int
0333 xchk_refcountbt_rec(
0334 struct xchk_btree *bs,
0335 const union xfs_btree_rec *rec)
0336 {
0337 xfs_agblock_t *cow_blocks = bs->private;
0338 struct xfs_perag *pag = bs->cur->bc_ag.pag;
0339 xfs_agblock_t bno;
0340 xfs_extlen_t len;
0341 xfs_nlink_t refcount;
0342 bool has_cowflag;
0343
0344 bno = be32_to_cpu(rec->refc.rc_startblock);
0345 len = be32_to_cpu(rec->refc.rc_blockcount);
0346 refcount = be32_to_cpu(rec->refc.rc_refcount);
0347
0348
0349 has_cowflag = (bno & XFS_REFC_COW_START);
0350 if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
0351 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0352 if (has_cowflag)
0353 (*cow_blocks) += len;
0354
0355
0356 bno &= ~XFS_REFC_COW_START;
0357 if (bno + len <= bno ||
0358 !xfs_verify_agbno(pag, bno) ||
0359 !xfs_verify_agbno(pag, bno + len - 1))
0360 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0361
0362 if (refcount == 0)
0363 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
0364
0365 xchk_refcountbt_xref(bs->sc, bno, len, refcount);
0366
0367 return 0;
0368 }
0369
0370
0371 STATIC void
0372 xchk_refcount_xref_rmap(
0373 struct xfs_scrub *sc,
0374 xfs_filblks_t cow_blocks)
0375 {
0376 xfs_extlen_t refcbt_blocks = 0;
0377 xfs_filblks_t blocks;
0378 int error;
0379
0380 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
0381 return;
0382
0383
0384 error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
0385 if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
0386 return;
0387 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
0388 &XFS_RMAP_OINFO_REFC, &blocks);
0389 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0390 return;
0391 if (blocks != refcbt_blocks)
0392 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
0393
0394
0395 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
0396 &XFS_RMAP_OINFO_COW, &blocks);
0397 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
0398 return;
0399 if (blocks != cow_blocks)
0400 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
0401 }
0402
0403
0404 int
0405 xchk_refcountbt(
0406 struct xfs_scrub *sc)
0407 {
0408 xfs_agblock_t cow_blocks = 0;
0409 int error;
0410
0411 error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
0412 &XFS_RMAP_OINFO_REFC, &cow_blocks);
0413 if (error)
0414 return error;
0415
0416 xchk_refcount_xref_rmap(sc, cow_blocks);
0417
0418 return 0;
0419 }
0420
0421
0422 void
0423 xchk_xref_is_cow_staging(
0424 struct xfs_scrub *sc,
0425 xfs_agblock_t agbno,
0426 xfs_extlen_t len)
0427 {
0428 struct xfs_refcount_irec rc;
0429 bool has_cowflag;
0430 int has_refcount;
0431 int error;
0432
0433 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
0434 return;
0435
0436
0437 error = xfs_refcount_lookup_le(sc->sa.refc_cur,
0438 agbno + XFS_REFC_COW_START, &has_refcount);
0439 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
0440 return;
0441 if (!has_refcount) {
0442 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
0443 return;
0444 }
0445
0446 error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
0447 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
0448 return;
0449 if (!has_refcount) {
0450 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
0451 return;
0452 }
0453
0454
0455 has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
0456 if (!has_cowflag || rc.rc_refcount != 1)
0457 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
0458
0459
0460 if (rc.rc_blockcount < len)
0461 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
0462 }
0463
0464
0465
0466
0467
0468 void
0469 xchk_xref_is_not_shared(
0470 struct xfs_scrub *sc,
0471 xfs_agblock_t agbno,
0472 xfs_extlen_t len)
0473 {
0474 bool shared;
0475 int error;
0476
0477 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
0478 return;
0479
0480 error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
0481 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
0482 return;
0483 if (shared)
0484 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
0485 }