0001
0002
0003
0004
0005
0006 #include "xfs.h"
0007 #include "xfs_fs.h"
0008 #include "xfs_shared.h"
0009 #include "xfs_format.h"
0010 #include "xfs_log_format.h"
0011 #include "xfs_trans_resv.h"
0012 #include "xfs_bit.h"
0013 #include "xfs_sb.h"
0014 #include "xfs_mount.h"
0015 #include "xfs_inode.h"
0016 #include "xfs_dir2.h"
0017 #include "xfs_ialloc.h"
0018 #include "xfs_alloc.h"
0019 #include "xfs_rtalloc.h"
0020 #include "xfs_bmap.h"
0021 #include "xfs_trans.h"
0022 #include "xfs_trans_priv.h"
0023 #include "xfs_log.h"
0024 #include "xfs_log_priv.h"
0025 #include "xfs_error.h"
0026 #include "xfs_quota.h"
0027 #include "xfs_fsops.h"
0028 #include "xfs_icache.h"
0029 #include "xfs_sysfs.h"
0030 #include "xfs_rmap_btree.h"
0031 #include "xfs_refcount_btree.h"
0032 #include "xfs_reflink.h"
0033 #include "xfs_extent_busy.h"
0034 #include "xfs_health.h"
0035 #include "xfs_trace.h"
0036 #include "xfs_ag.h"
0037
0038 static DEFINE_MUTEX(xfs_uuid_table_mutex);
0039 static int xfs_uuid_table_size;
0040 static uuid_t *xfs_uuid_table;
0041
0042 void
0043 xfs_uuid_table_free(void)
0044 {
0045 if (xfs_uuid_table_size == 0)
0046 return;
0047 kmem_free(xfs_uuid_table);
0048 xfs_uuid_table = NULL;
0049 xfs_uuid_table_size = 0;
0050 }
0051
0052
0053
0054
0055
0056 STATIC int
0057 xfs_uuid_mount(
0058 struct xfs_mount *mp)
0059 {
0060 uuid_t *uuid = &mp->m_sb.sb_uuid;
0061 int hole, i;
0062
0063
0064 uuid_copy(&mp->m_super->s_uuid, uuid);
0065
0066 if (xfs_has_nouuid(mp))
0067 return 0;
0068
0069 if (uuid_is_null(uuid)) {
0070 xfs_warn(mp, "Filesystem has null UUID - can't mount");
0071 return -EINVAL;
0072 }
0073
0074 mutex_lock(&xfs_uuid_table_mutex);
0075 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
0076 if (uuid_is_null(&xfs_uuid_table[i])) {
0077 hole = i;
0078 continue;
0079 }
0080 if (uuid_equal(uuid, &xfs_uuid_table[i]))
0081 goto out_duplicate;
0082 }
0083
0084 if (hole < 0) {
0085 xfs_uuid_table = krealloc(xfs_uuid_table,
0086 (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
0087 GFP_KERNEL | __GFP_NOFAIL);
0088 hole = xfs_uuid_table_size++;
0089 }
0090 xfs_uuid_table[hole] = *uuid;
0091 mutex_unlock(&xfs_uuid_table_mutex);
0092
0093 return 0;
0094
0095 out_duplicate:
0096 mutex_unlock(&xfs_uuid_table_mutex);
0097 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
0098 return -EINVAL;
0099 }
0100
0101 STATIC void
0102 xfs_uuid_unmount(
0103 struct xfs_mount *mp)
0104 {
0105 uuid_t *uuid = &mp->m_sb.sb_uuid;
0106 int i;
0107
0108 if (xfs_has_nouuid(mp))
0109 return;
0110
0111 mutex_lock(&xfs_uuid_table_mutex);
0112 for (i = 0; i < xfs_uuid_table_size; i++) {
0113 if (uuid_is_null(&xfs_uuid_table[i]))
0114 continue;
0115 if (!uuid_equal(uuid, &xfs_uuid_table[i]))
0116 continue;
0117 memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
0118 break;
0119 }
0120 ASSERT(i < xfs_uuid_table_size);
0121 mutex_unlock(&xfs_uuid_table_mutex);
0122 }
0123
0124
0125
0126
0127
0128 int
0129 xfs_sb_validate_fsb_count(
0130 xfs_sb_t *sbp,
0131 uint64_t nblocks)
0132 {
0133 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
0134 ASSERT(sbp->sb_blocklog >= BBSHIFT);
0135
0136
0137 if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
0138 return -EFBIG;
0139 return 0;
0140 }
0141
0142
0143
0144
0145
0146
0147 int
0148 xfs_readsb(
0149 struct xfs_mount *mp,
0150 int flags)
0151 {
0152 unsigned int sector_size;
0153 struct xfs_buf *bp;
0154 struct xfs_sb *sbp = &mp->m_sb;
0155 int error;
0156 int loud = !(flags & XFS_MFSI_QUIET);
0157 const struct xfs_buf_ops *buf_ops;
0158
0159 ASSERT(mp->m_sb_bp == NULL);
0160 ASSERT(mp->m_ddev_targp != NULL);
0161
0162
0163
0164
0165
0166
0167
0168
0169 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
0170 buf_ops = NULL;
0171
0172
0173
0174
0175
0176
0177
0178 reread:
0179 error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
0180 BTOBB(sector_size), XBF_NO_IOACCT, &bp,
0181 buf_ops);
0182 if (error) {
0183 if (loud)
0184 xfs_warn(mp, "SB validate failed with error %d.", error);
0185
0186 if (error == -EFSBADCRC)
0187 error = -EFSCORRUPTED;
0188 return error;
0189 }
0190
0191
0192
0193
0194 xfs_sb_from_disk(sbp, bp->b_addr);
0195
0196
0197
0198
0199
0200 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
0201 if (loud)
0202 xfs_warn(mp, "Invalid superblock magic number");
0203 error = -EINVAL;
0204 goto release_buf;
0205 }
0206
0207
0208
0209
0210 if (sector_size > sbp->sb_sectsize) {
0211 if (loud)
0212 xfs_warn(mp, "device supports %u byte sectors (not %u)",
0213 sector_size, sbp->sb_sectsize);
0214 error = -ENOSYS;
0215 goto release_buf;
0216 }
0217
0218 if (buf_ops == NULL) {
0219
0220
0221
0222
0223 xfs_buf_relse(bp);
0224 sector_size = sbp->sb_sectsize;
0225 buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
0226 goto reread;
0227 }
0228
0229 mp->m_features |= xfs_sb_version_to_features(sbp);
0230 xfs_reinit_percpu_counters(mp);
0231
0232
0233 bp->b_ops = &xfs_sb_buf_ops;
0234
0235 mp->m_sb_bp = bp;
0236 xfs_buf_unlock(bp);
0237 return 0;
0238
0239 release_buf:
0240 xfs_buf_relse(bp);
0241 return error;
0242 }
0243
0244
0245
0246
0247
0248
0249
0250 static inline int
0251 xfs_check_new_dalign(
0252 struct xfs_mount *mp,
0253 int new_dalign,
0254 bool *update_sb)
0255 {
0256 struct xfs_sb *sbp = &mp->m_sb;
0257 xfs_ino_t calc_ino;
0258
0259 calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
0260 trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
0261
0262 if (sbp->sb_rootino == calc_ino) {
0263 *update_sb = true;
0264 return 0;
0265 }
0266
0267 xfs_warn(mp,
0268 "Cannot change stripe alignment; would require moving root inode.");
0269
0270
0271
0272
0273
0274
0275 xfs_warn(mp, "Skipping superblock stripe alignment update.");
0276 *update_sb = false;
0277 return 0;
0278 }
0279
0280
0281
0282
0283
0284
0285
0286 STATIC int
0287 xfs_validate_new_dalign(
0288 struct xfs_mount *mp)
0289 {
0290 if (mp->m_dalign == 0)
0291 return 0;
0292
0293
0294
0295
0296
0297 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
0298 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
0299 xfs_warn(mp,
0300 "alignment check failed: sunit/swidth vs. blocksize(%d)",
0301 mp->m_sb.sb_blocksize);
0302 return -EINVAL;
0303 } else {
0304
0305
0306
0307 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
0308 if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
0309 xfs_warn(mp,
0310 "alignment check failed: sunit/swidth vs. agsize(%d)",
0311 mp->m_sb.sb_agblocks);
0312 return -EINVAL;
0313 } else if (mp->m_dalign) {
0314 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
0315 } else {
0316 xfs_warn(mp,
0317 "alignment check failed: sunit(%d) less than bsize(%d)",
0318 mp->m_dalign, mp->m_sb.sb_blocksize);
0319 return -EINVAL;
0320 }
0321 }
0322
0323 if (!xfs_has_dalign(mp)) {
0324 xfs_warn(mp,
0325 "cannot change alignment: superblock does not support data alignment");
0326 return -EINVAL;
0327 }
0328
0329 return 0;
0330 }
0331
0332
0333 STATIC int
0334 xfs_update_alignment(
0335 struct xfs_mount *mp)
0336 {
0337 struct xfs_sb *sbp = &mp->m_sb;
0338
0339 if (mp->m_dalign) {
0340 bool update_sb;
0341 int error;
0342
0343 if (sbp->sb_unit == mp->m_dalign &&
0344 sbp->sb_width == mp->m_swidth)
0345 return 0;
0346
0347 error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
0348 if (error || !update_sb)
0349 return error;
0350
0351 sbp->sb_unit = mp->m_dalign;
0352 sbp->sb_width = mp->m_swidth;
0353 mp->m_update_sb = true;
0354 } else if (!xfs_has_noalign(mp) && xfs_has_dalign(mp)) {
0355 mp->m_dalign = sbp->sb_unit;
0356 mp->m_swidth = sbp->sb_width;
0357 }
0358
0359 return 0;
0360 }
0361
0362
0363
0364
0365 void
0366 xfs_set_low_space_thresholds(
0367 struct xfs_mount *mp)
0368 {
0369 uint64_t dblocks = mp->m_sb.sb_dblocks;
0370 uint64_t rtexts = mp->m_sb.sb_rextents;
0371 int i;
0372
0373 do_div(dblocks, 100);
0374 do_div(rtexts, 100);
0375
0376 for (i = 0; i < XFS_LOWSP_MAX; i++) {
0377 mp->m_low_space[i] = dblocks * (i + 1);
0378 mp->m_low_rtexts[i] = rtexts * (i + 1);
0379 }
0380 }
0381
0382
0383
0384
0385 STATIC int
0386 xfs_check_sizes(
0387 struct xfs_mount *mp)
0388 {
0389 struct xfs_buf *bp;
0390 xfs_daddr_t d;
0391 int error;
0392
0393 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
0394 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
0395 xfs_warn(mp, "filesystem size mismatch detected");
0396 return -EFBIG;
0397 }
0398 error = xfs_buf_read_uncached(mp->m_ddev_targp,
0399 d - XFS_FSS_TO_BB(mp, 1),
0400 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
0401 if (error) {
0402 xfs_warn(mp, "last sector read failed");
0403 return error;
0404 }
0405 xfs_buf_relse(bp);
0406
0407 if (mp->m_logdev_targp == mp->m_ddev_targp)
0408 return 0;
0409
0410 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
0411 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
0412 xfs_warn(mp, "log size mismatch detected");
0413 return -EFBIG;
0414 }
0415 error = xfs_buf_read_uncached(mp->m_logdev_targp,
0416 d - XFS_FSB_TO_BB(mp, 1),
0417 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
0418 if (error) {
0419 xfs_warn(mp, "log device read failed");
0420 return error;
0421 }
0422 xfs_buf_relse(bp);
0423 return 0;
0424 }
0425
0426
0427
0428
0429 int
0430 xfs_mount_reset_sbqflags(
0431 struct xfs_mount *mp)
0432 {
0433 mp->m_qflags = 0;
0434
0435
0436 if (mp->m_sb.sb_qflags == 0)
0437 return 0;
0438 spin_lock(&mp->m_sb_lock);
0439 mp->m_sb.sb_qflags = 0;
0440 spin_unlock(&mp->m_sb_lock);
0441
0442 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
0443 return 0;
0444
0445 return xfs_sync_sb(mp, false);
0446 }
0447
0448 uint64_t
0449 xfs_default_resblks(xfs_mount_t *mp)
0450 {
0451 uint64_t resblks;
0452
0453
0454
0455
0456
0457
0458
0459
0460 resblks = mp->m_sb.sb_dblocks;
0461 do_div(resblks, 20);
0462 resblks = min_t(uint64_t, resblks, 8192);
0463 return resblks;
0464 }
0465
0466
0467 STATIC int
0468 xfs_check_summary_counts(
0469 struct xfs_mount *mp)
0470 {
0471 int error = 0;
0472
0473
0474
0475
0476
0477 if (mp->m_sb.sb_inprogress) {
0478 xfs_err(mp, "sb_inprogress set after log recovery??");
0479 WARN_ON(1);
0480 return -EFSCORRUPTED;
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 if (xfs_is_clean(mp) &&
0495 (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
0496 !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
0497 mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
0498 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511 if ((xfs_has_lazysbcount(mp) && !xfs_is_clean(mp)) ||
0512 xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS)) {
0513 error = xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
0514 if (error)
0515 return error;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 if (xfs_has_realtime(mp) && !xfs_is_clean(mp)) {
0531 error = xfs_rtalloc_reinit_frextents(mp);
0532 if (error)
0533 return error;
0534 }
0535
0536 return 0;
0537 }
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 static void
0566 xfs_unmount_flush_inodes(
0567 struct xfs_mount *mp)
0568 {
0569 xfs_log_force(mp, XFS_LOG_SYNC);
0570 xfs_extent_busy_wait_all(mp);
0571 flush_workqueue(xfs_discard_wq);
0572
0573 set_bit(XFS_OPSTATE_UNMOUNTING, &mp->m_opstate);
0574
0575 xfs_ail_push_all_sync(mp->m_ail);
0576 xfs_inodegc_stop(mp);
0577 cancel_delayed_work_sync(&mp->m_reclaim_work);
0578 xfs_reclaim_inodes(mp);
0579 xfs_health_unmount(mp);
0580 }
0581
0582 static void
0583 xfs_mount_setup_inode_geom(
0584 struct xfs_mount *mp)
0585 {
0586 struct xfs_ino_geometry *igeo = M_IGEO(mp);
0587
0588 igeo->attr_fork_offset = xfs_bmap_compute_attr_offset(mp);
0589 ASSERT(igeo->attr_fork_offset < XFS_LITINO(mp));
0590
0591 xfs_ialloc_setup_geometry(mp);
0592 }
0593
0594
0595 static inline void
0596 xfs_agbtree_compute_maxlevels(
0597 struct xfs_mount *mp)
0598 {
0599 unsigned int levels;
0600
0601 levels = max(mp->m_alloc_maxlevels, M_IGEO(mp)->inobt_maxlevels);
0602 levels = max(levels, mp->m_rmap_maxlevels);
0603 mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels);
0604 }
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 int
0617 xfs_mountfs(
0618 struct xfs_mount *mp)
0619 {
0620 struct xfs_sb *sbp = &(mp->m_sb);
0621 struct xfs_inode *rip;
0622 struct xfs_ino_geometry *igeo = M_IGEO(mp);
0623 uint64_t resblks;
0624 uint quotamount = 0;
0625 uint quotaflags = 0;
0626 int error = 0;
0627
0628 xfs_sb_mount_common(mp, sbp);
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 if (xfs_sb_has_mismatched_features2(sbp)) {
0647 xfs_warn(mp, "correcting sb_features alignment problem");
0648 sbp->sb_features2 |= sbp->sb_bad_features2;
0649 mp->m_update_sb = true;
0650 }
0651
0652
0653
0654 if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
0655 mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
0656 mp->m_features |= XFS_FEAT_NLINK;
0657 mp->m_update_sb = true;
0658 }
0659
0660
0661
0662
0663
0664
0665
0666 error = xfs_validate_new_dalign(mp);
0667 if (error)
0668 goto out;
0669
0670 xfs_alloc_compute_maxlevels(mp);
0671 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
0672 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
0673 xfs_mount_setup_inode_geom(mp);
0674 xfs_rmapbt_compute_maxlevels(mp);
0675 xfs_refcountbt_compute_maxlevels(mp);
0676
0677 xfs_agbtree_compute_maxlevels(mp);
0678
0679
0680
0681
0682
0683
0684
0685
0686 error = xfs_update_alignment(mp);
0687 if (error)
0688 goto out;
0689
0690
0691 mp->m_fail_unmount = true;
0692
0693 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
0694 NULL, mp->m_super->s_id);
0695 if (error)
0696 goto out;
0697
0698 error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
0699 &mp->m_kobj, "stats");
0700 if (error)
0701 goto out_remove_sysfs;
0702
0703 error = xfs_error_sysfs_init(mp);
0704 if (error)
0705 goto out_del_stats;
0706
0707 error = xfs_errortag_init(mp);
0708 if (error)
0709 goto out_remove_error_sysfs;
0710
0711 error = xfs_uuid_mount(mp);
0712 if (error)
0713 goto out_remove_errortag;
0714
0715
0716
0717
0718
0719 mp->m_allocsize_log =
0720 max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
0721 mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
0722
0723
0724 xfs_set_low_space_thresholds(mp);
0725
0726
0727
0728
0729
0730
0731 if (xfs_has_sparseinodes(mp) &&
0732 mp->m_sb.sb_spino_align !=
0733 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
0734 xfs_warn(mp,
0735 "Sparse inode block alignment (%u) must match cluster size (%llu).",
0736 mp->m_sb.sb_spino_align,
0737 XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
0738 error = -EINVAL;
0739 goto out_remove_uuid;
0740 }
0741
0742
0743
0744
0745 error = xfs_check_sizes(mp);
0746 if (error)
0747 goto out_remove_uuid;
0748
0749
0750
0751
0752 error = xfs_rtmount_init(mp);
0753 if (error) {
0754 xfs_warn(mp, "RT mount failed");
0755 goto out_remove_uuid;
0756 }
0757
0758
0759
0760
0761
0762 mp->m_fixedfsid[0] =
0763 (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
0764 get_unaligned_be16(&sbp->sb_uuid.b[4]);
0765 mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
0766
0767 error = xfs_da_mount(mp);
0768 if (error) {
0769 xfs_warn(mp, "Failed dir/attr init: %d", error);
0770 goto out_remove_uuid;
0771 }
0772
0773
0774
0775
0776 xfs_trans_init(mp);
0777
0778
0779
0780
0781 error = xfs_initialize_perag(mp, sbp->sb_agcount, mp->m_sb.sb_dblocks,
0782 &mp->m_maxagi);
0783 if (error) {
0784 xfs_warn(mp, "Failed per-ag init: %d", error);
0785 goto out_free_dir;
0786 }
0787
0788 if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
0789 xfs_warn(mp, "no log defined");
0790 error = -EFSCORRUPTED;
0791 goto out_free_perag;
0792 }
0793
0794 error = xfs_inodegc_register_shrinker(mp);
0795 if (error)
0796 goto out_fail_wait;
0797
0798
0799
0800
0801
0802
0803 error = xfs_log_mount(mp, mp->m_logdev_targp,
0804 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
0805 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
0806 if (error) {
0807 xfs_warn(mp, "log mount failed");
0808 goto out_inodegc_shrinker;
0809 }
0810
0811
0812 xfs_inodegc_start(mp);
0813 xfs_blockgc_start(mp);
0814
0815
0816
0817
0818
0819
0820
0821 if (xfs_has_noattr2(mp)) {
0822 mp->m_features &= ~XFS_FEAT_ATTR2;
0823 } else if (!xfs_has_attr2(mp) &&
0824 (mp->m_sb.sb_features2 & XFS_SB_VERSION2_ATTR2BIT)) {
0825 mp->m_features |= XFS_FEAT_ATTR2;
0826 }
0827
0828
0829
0830
0831
0832 error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
0833 XFS_ILOCK_EXCL, &rip);
0834 if (error) {
0835 xfs_warn(mp,
0836 "Failed to read root inode 0x%llx, error %d",
0837 sbp->sb_rootino, -error);
0838 goto out_log_dealloc;
0839 }
0840
0841 ASSERT(rip != NULL);
0842
0843 if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
0844 xfs_warn(mp, "corrupted root inode %llu: not a directory",
0845 (unsigned long long)rip->i_ino);
0846 xfs_iunlock(rip, XFS_ILOCK_EXCL);
0847 error = -EFSCORRUPTED;
0848 goto out_rele_rip;
0849 }
0850 mp->m_rootip = rip;
0851
0852 xfs_iunlock(rip, XFS_ILOCK_EXCL);
0853
0854
0855
0856
0857 error = xfs_rtmount_inodes(mp);
0858 if (error) {
0859
0860
0861
0862 xfs_warn(mp, "failed to read RT inodes");
0863 goto out_rele_rip;
0864 }
0865
0866
0867 error = xfs_check_summary_counts(mp);
0868 if (error)
0869 goto out_rtunmount;
0870
0871
0872
0873
0874
0875
0876 if (mp->m_update_sb && !xfs_is_readonly(mp)) {
0877 error = xfs_sync_sb(mp, false);
0878 if (error) {
0879 xfs_warn(mp, "failed to write sb changes");
0880 goto out_rtunmount;
0881 }
0882 }
0883
0884
0885
0886
0887 if (XFS_IS_QUOTA_ON(mp)) {
0888 error = xfs_qm_newmount(mp, "amount, "aflags);
0889 if (error)
0890 goto out_rtunmount;
0891 } else {
0892
0893
0894
0895
0896
0897 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
0898 xfs_notice(mp, "resetting quota flags");
0899 error = xfs_mount_reset_sbqflags(mp);
0900 if (error)
0901 goto out_rtunmount;
0902 }
0903 }
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913 error = xfs_fs_reserve_ag_blocks(mp);
0914 if (error && error == -ENOSPC)
0915 xfs_warn(mp,
0916 "ENOSPC reserving per-AG metadata pool, log recovery may fail.");
0917 error = xfs_log_mount_finish(mp);
0918 xfs_fs_unreserve_ag_blocks(mp);
0919 if (error) {
0920 xfs_warn(mp, "log mount finish failed");
0921 goto out_rtunmount;
0922 }
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933 if (xfs_is_readonly(mp) && !xfs_has_norecovery(mp))
0934 xfs_log_clean(mp);
0935
0936
0937
0938
0939 if (quotamount) {
0940 ASSERT(mp->m_qflags == 0);
0941 mp->m_qflags = quotaflags;
0942
0943 xfs_qm_mount_quotas(mp);
0944 }
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957 if (!xfs_is_readonly(mp)) {
0958 resblks = xfs_default_resblks(mp);
0959 error = xfs_reserve_blocks(mp, &resblks, NULL);
0960 if (error)
0961 xfs_warn(mp,
0962 "Unable to allocate reserve blocks. Continuing without reserve pool.");
0963
0964
0965 error = xfs_fs_reserve_ag_blocks(mp);
0966 if (error && error != -ENOSPC)
0967 goto out_agresv;
0968 }
0969
0970 return 0;
0971
0972 out_agresv:
0973 xfs_fs_unreserve_ag_blocks(mp);
0974 xfs_qm_unmount_quotas(mp);
0975 out_rtunmount:
0976 xfs_rtunmount_inodes(mp);
0977 out_rele_rip:
0978 xfs_irele(rip);
0979
0980 xfs_qm_unmount(mp);
0981
0982
0983
0984
0985
0986
0987
0988 xfs_inodegc_flush(mp);
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 xfs_unmount_flush_inodes(mp);
1002 out_log_dealloc:
1003 xfs_log_mount_cancel(mp);
1004 out_inodegc_shrinker:
1005 unregister_shrinker(&mp->m_inodegc_shrinker);
1006 out_fail_wait:
1007 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1008 xfs_buftarg_drain(mp->m_logdev_targp);
1009 xfs_buftarg_drain(mp->m_ddev_targp);
1010 out_free_perag:
1011 xfs_free_perag(mp);
1012 out_free_dir:
1013 xfs_da_unmount(mp);
1014 out_remove_uuid:
1015 xfs_uuid_unmount(mp);
1016 out_remove_errortag:
1017 xfs_errortag_del(mp);
1018 out_remove_error_sysfs:
1019 xfs_error_sysfs_del(mp);
1020 out_del_stats:
1021 xfs_sysfs_del(&mp->m_stats.xs_kobj);
1022 out_remove_sysfs:
1023 xfs_sysfs_del(&mp->m_kobj);
1024 out:
1025 return error;
1026 }
1027
1028
1029
1030
1031
1032 void
1033 xfs_unmountfs(
1034 struct xfs_mount *mp)
1035 {
1036 uint64_t resblks;
1037 int error;
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047 xfs_inodegc_flush(mp);
1048
1049 xfs_blockgc_stop(mp);
1050 xfs_fs_unreserve_ag_blocks(mp);
1051 xfs_qm_unmount_quotas(mp);
1052 xfs_rtunmount_inodes(mp);
1053 xfs_irele(mp->m_rootip);
1054
1055 xfs_unmount_flush_inodes(mp);
1056
1057 xfs_qm_unmount(mp);
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 resblks = 0;
1074 error = xfs_reserve_blocks(mp, &resblks, NULL);
1075 if (error)
1076 xfs_warn(mp, "Unable to free reserved block pool. "
1077 "Freespace may not be correct on next mount.");
1078
1079 xfs_log_unmount(mp);
1080 xfs_da_unmount(mp);
1081 xfs_uuid_unmount(mp);
1082
1083 #if defined(DEBUG)
1084 xfs_errortag_clearall(mp);
1085 #endif
1086 unregister_shrinker(&mp->m_inodegc_shrinker);
1087 xfs_free_perag(mp);
1088
1089 xfs_errortag_del(mp);
1090 xfs_error_sysfs_del(mp);
1091 xfs_sysfs_del(&mp->m_stats.xs_kobj);
1092 xfs_sysfs_del(&mp->m_kobj);
1093 }
1094
1095
1096
1097
1098
1099
1100
1101 bool
1102 xfs_fs_writable(
1103 struct xfs_mount *mp,
1104 int level)
1105 {
1106 ASSERT(level > SB_UNFROZEN);
1107 if ((mp->m_super->s_writers.frozen >= level) ||
1108 xfs_is_shutdown(mp) || xfs_is_readonly(mp))
1109 return false;
1110
1111 return true;
1112 }
1113
1114
1115 int
1116 xfs_mod_freecounter(
1117 struct xfs_mount *mp,
1118 struct percpu_counter *counter,
1119 int64_t delta,
1120 bool rsvd)
1121 {
1122 int64_t lcounter;
1123 long long res_used;
1124 uint64_t set_aside = 0;
1125 s32 batch;
1126 bool has_resv_pool;
1127
1128 ASSERT(counter == &mp->m_fdblocks || counter == &mp->m_frextents);
1129 has_resv_pool = (counter == &mp->m_fdblocks);
1130 if (rsvd)
1131 ASSERT(has_resv_pool);
1132
1133 if (delta > 0) {
1134
1135
1136
1137
1138 if (likely(!has_resv_pool ||
1139 mp->m_resblks == mp->m_resblks_avail)) {
1140 percpu_counter_add(counter, delta);
1141 return 0;
1142 }
1143
1144 spin_lock(&mp->m_sb_lock);
1145 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1146
1147 if (res_used > delta) {
1148 mp->m_resblks_avail += delta;
1149 } else {
1150 delta -= res_used;
1151 mp->m_resblks_avail = mp->m_resblks;
1152 percpu_counter_add(counter, delta);
1153 }
1154 spin_unlock(&mp->m_sb_lock);
1155 return 0;
1156 }
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 if (__percpu_counter_compare(counter, 2 * XFS_FDBLOCKS_BATCH,
1167 XFS_FDBLOCKS_BATCH) < 0)
1168 batch = 1;
1169 else
1170 batch = XFS_FDBLOCKS_BATCH;
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183 if (has_resv_pool)
1184 set_aside = xfs_fdblocks_unavailable(mp);
1185 percpu_counter_add_batch(counter, delta, batch);
1186 if (__percpu_counter_compare(counter, set_aside,
1187 XFS_FDBLOCKS_BATCH) >= 0) {
1188
1189 return 0;
1190 }
1191
1192
1193
1194
1195
1196 spin_lock(&mp->m_sb_lock);
1197 percpu_counter_add(counter, -delta);
1198 if (!has_resv_pool || !rsvd)
1199 goto fdblocks_enospc;
1200
1201 lcounter = (long long)mp->m_resblks_avail + delta;
1202 if (lcounter >= 0) {
1203 mp->m_resblks_avail = lcounter;
1204 spin_unlock(&mp->m_sb_lock);
1205 return 0;
1206 }
1207 xfs_warn_once(mp,
1208 "Reserve blocks depleted! Consider increasing reserve pool size.");
1209
1210 fdblocks_enospc:
1211 spin_unlock(&mp->m_sb_lock);
1212 return -ENOSPC;
1213 }
1214
1215
1216
1217
1218 void
1219 xfs_freesb(
1220 struct xfs_mount *mp)
1221 {
1222 struct xfs_buf *bp = mp->m_sb_bp;
1223
1224 xfs_buf_lock(bp);
1225 mp->m_sb_bp = NULL;
1226 xfs_buf_relse(bp);
1227 }
1228
1229
1230
1231
1232
1233 int
1234 xfs_dev_is_read_only(
1235 struct xfs_mount *mp,
1236 char *message)
1237 {
1238 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1239 xfs_readonly_buftarg(mp->m_logdev_targp) ||
1240 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1241 xfs_notice(mp, "%s required on read-only device.", message);
1242 xfs_notice(mp, "write access unavailable, cannot proceed.");
1243 return -EROFS;
1244 }
1245 return 0;
1246 }
1247
1248
1249 void
1250 xfs_force_summary_recalc(
1251 struct xfs_mount *mp)
1252 {
1253 if (!xfs_has_lazysbcount(mp))
1254 return;
1255
1256 xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
1257 }
1258
1259
1260
1261
1262
1263 int
1264 xfs_add_incompat_log_feature(
1265 struct xfs_mount *mp,
1266 uint32_t feature)
1267 {
1268 struct xfs_dsb *dsb;
1269 int error;
1270
1271 ASSERT(hweight32(feature) == 1);
1272 ASSERT(!(feature & XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
1273
1274
1275
1276
1277
1278
1279
1280 error = xfs_log_force(mp, XFS_LOG_SYNC);
1281 if (error)
1282 return error;
1283 xfs_ail_push_all(mp->m_ail);
1284
1285
1286
1287
1288
1289 xfs_buf_lock(mp->m_sb_bp);
1290 xfs_buf_hold(mp->m_sb_bp);
1291
1292 if (xfs_is_shutdown(mp)) {
1293 error = -EIO;
1294 goto rele;
1295 }
1296
1297 if (xfs_sb_has_incompat_log_feature(&mp->m_sb, feature))
1298 goto rele;
1299
1300
1301
1302
1303
1304
1305 dsb = mp->m_sb_bp->b_addr;
1306 xfs_sb_to_disk(dsb, &mp->m_sb);
1307 dsb->sb_features_log_incompat |= cpu_to_be32(feature);
1308 error = xfs_bwrite(mp->m_sb_bp);
1309 if (error)
1310 goto shutdown;
1311
1312
1313
1314
1315
1316 xfs_sb_add_incompat_log_features(&mp->m_sb, feature);
1317 xfs_buf_relse(mp->m_sb_bp);
1318
1319
1320 return xfs_sync_sb(mp, false);
1321 shutdown:
1322 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1323 rele:
1324 xfs_buf_relse(mp->m_sb_bp);
1325 return error;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 bool
1339 xfs_clear_incompat_log_features(
1340 struct xfs_mount *mp)
1341 {
1342 bool ret = false;
1343
1344 if (!xfs_has_crc(mp) ||
1345 !xfs_sb_has_incompat_log_feature(&mp->m_sb,
1346 XFS_SB_FEAT_INCOMPAT_LOG_ALL) ||
1347 xfs_is_shutdown(mp))
1348 return false;
1349
1350
1351
1352
1353
1354
1355 xfs_buf_lock(mp->m_sb_bp);
1356 xfs_buf_hold(mp->m_sb_bp);
1357
1358 if (xfs_sb_has_incompat_log_feature(&mp->m_sb,
1359 XFS_SB_FEAT_INCOMPAT_LOG_ALL)) {
1360 xfs_sb_remove_incompat_log_features(&mp->m_sb);
1361 ret = true;
1362 }
1363
1364 xfs_buf_relse(mp->m_sb_bp);
1365 return ret;
1366 }
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 #define XFS_DELALLOC_BATCH (4096)
1380 void
1381 xfs_mod_delalloc(
1382 struct xfs_mount *mp,
1383 int64_t delta)
1384 {
1385 percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
1386 XFS_DELALLOC_BATCH);
1387 }