0001
0002
0003
0004
0005
0006 #include <linux/iversion.h>
0007
0008 #include "xfs.h"
0009 #include "xfs_fs.h"
0010 #include "xfs_shared.h"
0011 #include "xfs_format.h"
0012 #include "xfs_log_format.h"
0013 #include "xfs_trans_resv.h"
0014 #include "xfs_mount.h"
0015 #include "xfs_defer.h"
0016 #include "xfs_inode.h"
0017 #include "xfs_dir2.h"
0018 #include "xfs_attr.h"
0019 #include "xfs_trans_space.h"
0020 #include "xfs_trans.h"
0021 #include "xfs_buf_item.h"
0022 #include "xfs_inode_item.h"
0023 #include "xfs_iunlink_item.h"
0024 #include "xfs_ialloc.h"
0025 #include "xfs_bmap.h"
0026 #include "xfs_bmap_util.h"
0027 #include "xfs_errortag.h"
0028 #include "xfs_error.h"
0029 #include "xfs_quota.h"
0030 #include "xfs_filestream.h"
0031 #include "xfs_trace.h"
0032 #include "xfs_icache.h"
0033 #include "xfs_symlink.h"
0034 #include "xfs_trans_priv.h"
0035 #include "xfs_log.h"
0036 #include "xfs_bmap_btree.h"
0037 #include "xfs_reflink.h"
0038 #include "xfs_ag.h"
0039 #include "xfs_log_priv.h"
0040
0041 struct kmem_cache *xfs_inode_cache;
0042
0043
0044
0045
0046
0047 #define XFS_ITRUNC_MAX_EXTENTS 2
0048
0049 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
0050 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
0051 struct xfs_inode *);
0052
0053
0054
0055
0056 xfs_extlen_t
0057 xfs_get_extsz_hint(
0058 struct xfs_inode *ip)
0059 {
0060
0061
0062
0063
0064 if (xfs_is_always_cow_inode(ip))
0065 return 0;
0066 if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
0067 return ip->i_extsize;
0068 if (XFS_IS_REALTIME_INODE(ip))
0069 return ip->i_mount->m_sb.sb_rextsize;
0070 return 0;
0071 }
0072
0073
0074
0075
0076
0077
0078
0079 xfs_extlen_t
0080 xfs_get_cowextsz_hint(
0081 struct xfs_inode *ip)
0082 {
0083 xfs_extlen_t a, b;
0084
0085 a = 0;
0086 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
0087 a = ip->i_cowextsize;
0088 b = xfs_get_extsz_hint(ip);
0089
0090 a = max(a, b);
0091 if (a == 0)
0092 return XFS_DEFAULT_COWEXTSZ_HINT;
0093 return a;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 uint
0112 xfs_ilock_data_map_shared(
0113 struct xfs_inode *ip)
0114 {
0115 uint lock_mode = XFS_ILOCK_SHARED;
0116
0117 if (xfs_need_iread_extents(&ip->i_df))
0118 lock_mode = XFS_ILOCK_EXCL;
0119 xfs_ilock(ip, lock_mode);
0120 return lock_mode;
0121 }
0122
0123 uint
0124 xfs_ilock_attr_map_shared(
0125 struct xfs_inode *ip)
0126 {
0127 uint lock_mode = XFS_ILOCK_SHARED;
0128
0129 if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
0130 lock_mode = XFS_ILOCK_EXCL;
0131 xfs_ilock(ip, lock_mode);
0132 return lock_mode;
0133 }
0134
0135
0136
0137
0138
0139
0140
0141 static inline void
0142 xfs_lock_flags_assert(
0143 uint lock_flags)
0144 {
0145 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
0146 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
0147 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
0148 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
0149 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
0150 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0151 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
0152 ASSERT(lock_flags != 0);
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 void
0186 xfs_ilock(
0187 xfs_inode_t *ip,
0188 uint lock_flags)
0189 {
0190 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
0191
0192 xfs_lock_flags_assert(lock_flags);
0193
0194 if (lock_flags & XFS_IOLOCK_EXCL) {
0195 down_write_nested(&VFS_I(ip)->i_rwsem,
0196 XFS_IOLOCK_DEP(lock_flags));
0197 } else if (lock_flags & XFS_IOLOCK_SHARED) {
0198 down_read_nested(&VFS_I(ip)->i_rwsem,
0199 XFS_IOLOCK_DEP(lock_flags));
0200 }
0201
0202 if (lock_flags & XFS_MMAPLOCK_EXCL) {
0203 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
0204 XFS_MMAPLOCK_DEP(lock_flags));
0205 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
0206 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
0207 XFS_MMAPLOCK_DEP(lock_flags));
0208 }
0209
0210 if (lock_flags & XFS_ILOCK_EXCL)
0211 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
0212 else if (lock_flags & XFS_ILOCK_SHARED)
0213 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 int
0229 xfs_ilock_nowait(
0230 xfs_inode_t *ip,
0231 uint lock_flags)
0232 {
0233 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
0234
0235 xfs_lock_flags_assert(lock_flags);
0236
0237 if (lock_flags & XFS_IOLOCK_EXCL) {
0238 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
0239 goto out;
0240 } else if (lock_flags & XFS_IOLOCK_SHARED) {
0241 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
0242 goto out;
0243 }
0244
0245 if (lock_flags & XFS_MMAPLOCK_EXCL) {
0246 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
0247 goto out_undo_iolock;
0248 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
0249 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
0250 goto out_undo_iolock;
0251 }
0252
0253 if (lock_flags & XFS_ILOCK_EXCL) {
0254 if (!mrtryupdate(&ip->i_lock))
0255 goto out_undo_mmaplock;
0256 } else if (lock_flags & XFS_ILOCK_SHARED) {
0257 if (!mrtryaccess(&ip->i_lock))
0258 goto out_undo_mmaplock;
0259 }
0260 return 1;
0261
0262 out_undo_mmaplock:
0263 if (lock_flags & XFS_MMAPLOCK_EXCL)
0264 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
0265 else if (lock_flags & XFS_MMAPLOCK_SHARED)
0266 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
0267 out_undo_iolock:
0268 if (lock_flags & XFS_IOLOCK_EXCL)
0269 up_write(&VFS_I(ip)->i_rwsem);
0270 else if (lock_flags & XFS_IOLOCK_SHARED)
0271 up_read(&VFS_I(ip)->i_rwsem);
0272 out:
0273 return 0;
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288 void
0289 xfs_iunlock(
0290 xfs_inode_t *ip,
0291 uint lock_flags)
0292 {
0293 xfs_lock_flags_assert(lock_flags);
0294
0295 if (lock_flags & XFS_IOLOCK_EXCL)
0296 up_write(&VFS_I(ip)->i_rwsem);
0297 else if (lock_flags & XFS_IOLOCK_SHARED)
0298 up_read(&VFS_I(ip)->i_rwsem);
0299
0300 if (lock_flags & XFS_MMAPLOCK_EXCL)
0301 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
0302 else if (lock_flags & XFS_MMAPLOCK_SHARED)
0303 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
0304
0305 if (lock_flags & XFS_ILOCK_EXCL)
0306 mrunlock_excl(&ip->i_lock);
0307 else if (lock_flags & XFS_ILOCK_SHARED)
0308 mrunlock_shared(&ip->i_lock);
0309
0310 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
0311 }
0312
0313
0314
0315
0316
0317 void
0318 xfs_ilock_demote(
0319 xfs_inode_t *ip,
0320 uint lock_flags)
0321 {
0322 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
0323 ASSERT((lock_flags &
0324 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
0325
0326 if (lock_flags & XFS_ILOCK_EXCL)
0327 mrdemote(&ip->i_lock);
0328 if (lock_flags & XFS_MMAPLOCK_EXCL)
0329 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
0330 if (lock_flags & XFS_IOLOCK_EXCL)
0331 downgrade_write(&VFS_I(ip)->i_rwsem);
0332
0333 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
0334 }
0335
0336 #if defined(DEBUG) || defined(XFS_WARN)
0337 static inline bool
0338 __xfs_rwsem_islocked(
0339 struct rw_semaphore *rwsem,
0340 bool shared)
0341 {
0342 if (!debug_locks)
0343 return rwsem_is_locked(rwsem);
0344
0345 if (!shared)
0346 return lockdep_is_held_type(rwsem, 0);
0347
0348
0349
0350
0351
0352
0353
0354 return lockdep_is_held_type(rwsem, -1);
0355 }
0356
0357 bool
0358 xfs_isilocked(
0359 struct xfs_inode *ip,
0360 uint lock_flags)
0361 {
0362 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
0363 if (!(lock_flags & XFS_ILOCK_SHARED))
0364 return !!ip->i_lock.mr_writer;
0365 return rwsem_is_locked(&ip->i_lock.mr_lock);
0366 }
0367
0368 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
0369 return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
0370 (lock_flags & XFS_MMAPLOCK_SHARED));
0371 }
0372
0373 if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
0374 return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
0375 (lock_flags & XFS_IOLOCK_SHARED));
0376 }
0377
0378 ASSERT(0);
0379 return false;
0380 }
0381 #endif
0382
0383
0384
0385
0386
0387
0388
0389 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
0390 static bool
0391 xfs_lockdep_subclass_ok(
0392 int subclass)
0393 {
0394 return subclass < MAX_LOCKDEP_SUBCLASSES;
0395 }
0396 #else
0397 #define xfs_lockdep_subclass_ok(subclass) (true)
0398 #endif
0399
0400
0401
0402
0403
0404
0405
0406 static inline uint
0407 xfs_lock_inumorder(
0408 uint lock_mode,
0409 uint subclass)
0410 {
0411 uint class = 0;
0412
0413 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
0414 XFS_ILOCK_RTSUM)));
0415 ASSERT(xfs_lockdep_subclass_ok(subclass));
0416
0417 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
0418 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
0419 class += subclass << XFS_IOLOCK_SHIFT;
0420 }
0421
0422 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
0423 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
0424 class += subclass << XFS_MMAPLOCK_SHIFT;
0425 }
0426
0427 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
0428 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
0429 class += subclass << XFS_ILOCK_SHIFT;
0430 }
0431
0432 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
0433 }
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450 static void
0451 xfs_lock_inodes(
0452 struct xfs_inode **ips,
0453 int inodes,
0454 uint lock_mode)
0455 {
0456 int attempts = 0;
0457 uint i;
0458 int j;
0459 bool try_lock;
0460 struct xfs_log_item *lp;
0461
0462
0463
0464
0465
0466
0467
0468
0469 ASSERT(ips && inodes >= 2 && inodes <= 5);
0470 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
0471 XFS_ILOCK_EXCL));
0472 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
0473 XFS_ILOCK_SHARED)));
0474 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
0475 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
0476 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
0477 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
0478
0479 if (lock_mode & XFS_IOLOCK_EXCL) {
0480 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
0481 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
0482 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
0483
0484 again:
0485 try_lock = false;
0486 i = 0;
0487 for (; i < inodes; i++) {
0488 ASSERT(ips[i]);
0489
0490 if (i && (ips[i] == ips[i - 1]))
0491 continue;
0492
0493
0494
0495
0496
0497 if (!try_lock) {
0498 for (j = (i - 1); j >= 0 && !try_lock; j--) {
0499 lp = &ips[j]->i_itemp->ili_item;
0500 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
0501 try_lock = true;
0502 }
0503 }
0504
0505
0506
0507
0508
0509
0510
0511 if (!try_lock) {
0512 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
0513 continue;
0514 }
0515
0516
0517 ASSERT(i != 0);
0518 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
0519 continue;
0520
0521
0522
0523
0524
0525 attempts++;
0526 for (j = i - 1; j >= 0; j--) {
0527
0528
0529
0530
0531
0532 if (j != (i - 1) && ips[j] == ips[j + 1])
0533 continue;
0534
0535 xfs_iunlock(ips[j], lock_mode);
0536 }
0537
0538 if ((attempts % 5) == 0) {
0539 delay(1);
0540 }
0541 goto again;
0542 }
0543 }
0544
0545
0546
0547
0548
0549
0550
0551 void
0552 xfs_lock_two_inodes(
0553 struct xfs_inode *ip0,
0554 uint ip0_mode,
0555 struct xfs_inode *ip1,
0556 uint ip1_mode)
0557 {
0558 int attempts = 0;
0559 struct xfs_log_item *lp;
0560
0561 ASSERT(hweight32(ip0_mode) == 1);
0562 ASSERT(hweight32(ip1_mode) == 1);
0563 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
0564 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
0565 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
0566 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
0567 ASSERT(ip0->i_ino != ip1->i_ino);
0568
0569 if (ip0->i_ino > ip1->i_ino) {
0570 swap(ip0, ip1);
0571 swap(ip0_mode, ip1_mode);
0572 }
0573
0574 again:
0575 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
0576
0577
0578
0579
0580
0581
0582 lp = &ip0->i_itemp->ili_item;
0583 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
0584 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
0585 xfs_iunlock(ip0, ip0_mode);
0586 if ((++attempts % 5) == 0)
0587 delay(1);
0588 goto again;
0589 }
0590 } else {
0591 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
0592 }
0593 }
0594
0595 uint
0596 xfs_ip2xflags(
0597 struct xfs_inode *ip)
0598 {
0599 uint flags = 0;
0600
0601 if (ip->i_diflags & XFS_DIFLAG_ANY) {
0602 if (ip->i_diflags & XFS_DIFLAG_REALTIME)
0603 flags |= FS_XFLAG_REALTIME;
0604 if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
0605 flags |= FS_XFLAG_PREALLOC;
0606 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
0607 flags |= FS_XFLAG_IMMUTABLE;
0608 if (ip->i_diflags & XFS_DIFLAG_APPEND)
0609 flags |= FS_XFLAG_APPEND;
0610 if (ip->i_diflags & XFS_DIFLAG_SYNC)
0611 flags |= FS_XFLAG_SYNC;
0612 if (ip->i_diflags & XFS_DIFLAG_NOATIME)
0613 flags |= FS_XFLAG_NOATIME;
0614 if (ip->i_diflags & XFS_DIFLAG_NODUMP)
0615 flags |= FS_XFLAG_NODUMP;
0616 if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
0617 flags |= FS_XFLAG_RTINHERIT;
0618 if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
0619 flags |= FS_XFLAG_PROJINHERIT;
0620 if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
0621 flags |= FS_XFLAG_NOSYMLINKS;
0622 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
0623 flags |= FS_XFLAG_EXTSIZE;
0624 if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
0625 flags |= FS_XFLAG_EXTSZINHERIT;
0626 if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
0627 flags |= FS_XFLAG_NODEFRAG;
0628 if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
0629 flags |= FS_XFLAG_FILESTREAM;
0630 }
0631
0632 if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
0633 if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
0634 flags |= FS_XFLAG_DAX;
0635 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
0636 flags |= FS_XFLAG_COWEXTSIZE;
0637 }
0638
0639 if (xfs_inode_has_attr_fork(ip))
0640 flags |= FS_XFLAG_HASATTR;
0641 return flags;
0642 }
0643
0644
0645
0646
0647
0648
0649
0650 int
0651 xfs_lookup(
0652 struct xfs_inode *dp,
0653 const struct xfs_name *name,
0654 struct xfs_inode **ipp,
0655 struct xfs_name *ci_name)
0656 {
0657 xfs_ino_t inum;
0658 int error;
0659
0660 trace_xfs_lookup(dp, name);
0661
0662 if (xfs_is_shutdown(dp->i_mount))
0663 return -EIO;
0664
0665 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
0666 if (error)
0667 goto out_unlock;
0668
0669 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
0670 if (error)
0671 goto out_free_name;
0672
0673 return 0;
0674
0675 out_free_name:
0676 if (ci_name)
0677 kmem_free(ci_name->name);
0678 out_unlock:
0679 *ipp = NULL;
0680 return error;
0681 }
0682
0683
0684 static void
0685 xfs_inode_inherit_flags(
0686 struct xfs_inode *ip,
0687 const struct xfs_inode *pip)
0688 {
0689 unsigned int di_flags = 0;
0690 xfs_failaddr_t failaddr;
0691 umode_t mode = VFS_I(ip)->i_mode;
0692
0693 if (S_ISDIR(mode)) {
0694 if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
0695 di_flags |= XFS_DIFLAG_RTINHERIT;
0696 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
0697 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
0698 ip->i_extsize = pip->i_extsize;
0699 }
0700 if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
0701 di_flags |= XFS_DIFLAG_PROJINHERIT;
0702 } else if (S_ISREG(mode)) {
0703 if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
0704 xfs_has_realtime(ip->i_mount))
0705 di_flags |= XFS_DIFLAG_REALTIME;
0706 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
0707 di_flags |= XFS_DIFLAG_EXTSIZE;
0708 ip->i_extsize = pip->i_extsize;
0709 }
0710 }
0711 if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
0712 xfs_inherit_noatime)
0713 di_flags |= XFS_DIFLAG_NOATIME;
0714 if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
0715 xfs_inherit_nodump)
0716 di_flags |= XFS_DIFLAG_NODUMP;
0717 if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
0718 xfs_inherit_sync)
0719 di_flags |= XFS_DIFLAG_SYNC;
0720 if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
0721 xfs_inherit_nosymlinks)
0722 di_flags |= XFS_DIFLAG_NOSYMLINKS;
0723 if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
0724 xfs_inherit_nodefrag)
0725 di_flags |= XFS_DIFLAG_NODEFRAG;
0726 if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
0727 di_flags |= XFS_DIFLAG_FILESTREAM;
0728
0729 ip->i_diflags |= di_flags;
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741 failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
0742 VFS_I(ip)->i_mode, ip->i_diflags);
0743 if (failaddr) {
0744 ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
0745 XFS_DIFLAG_EXTSZINHERIT);
0746 ip->i_extsize = 0;
0747 }
0748 }
0749
0750
0751 static void
0752 xfs_inode_inherit_flags2(
0753 struct xfs_inode *ip,
0754 const struct xfs_inode *pip)
0755 {
0756 xfs_failaddr_t failaddr;
0757
0758 if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
0759 ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
0760 ip->i_cowextsize = pip->i_cowextsize;
0761 }
0762 if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
0763 ip->i_diflags2 |= XFS_DIFLAG2_DAX;
0764
0765
0766 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
0767 VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
0768 if (failaddr) {
0769 ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
0770 ip->i_cowextsize = 0;
0771 }
0772 }
0773
0774
0775
0776
0777
0778 int
0779 xfs_init_new_inode(
0780 struct user_namespace *mnt_userns,
0781 struct xfs_trans *tp,
0782 struct xfs_inode *pip,
0783 xfs_ino_t ino,
0784 umode_t mode,
0785 xfs_nlink_t nlink,
0786 dev_t rdev,
0787 prid_t prid,
0788 bool init_xattrs,
0789 struct xfs_inode **ipp)
0790 {
0791 struct inode *dir = pip ? VFS_I(pip) : NULL;
0792 struct xfs_mount *mp = tp->t_mountp;
0793 struct xfs_inode *ip;
0794 unsigned int flags;
0795 int error;
0796 struct timespec64 tv;
0797 struct inode *inode;
0798
0799
0800
0801
0802
0803
0804
0805
0806 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
0807 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
0808 return -EFSCORRUPTED;
0809 }
0810
0811
0812
0813
0814
0815 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
0816 if (error)
0817 return error;
0818
0819 ASSERT(ip != NULL);
0820 inode = VFS_I(ip);
0821 set_nlink(inode, nlink);
0822 inode->i_rdev = rdev;
0823 ip->i_projid = prid;
0824
0825 if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
0826 inode_fsuid_set(inode, mnt_userns);
0827 inode->i_gid = dir->i_gid;
0828 inode->i_mode = mode;
0829 } else {
0830 inode_init_owner(mnt_userns, inode, dir, mode);
0831 }
0832
0833
0834
0835
0836
0837
0838 if (irix_sgid_inherit &&
0839 (inode->i_mode & S_ISGID) &&
0840 !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
0841 inode->i_mode &= ~S_ISGID;
0842
0843 ip->i_disk_size = 0;
0844 ip->i_df.if_nextents = 0;
0845 ASSERT(ip->i_nblocks == 0);
0846
0847 tv = current_time(inode);
0848 inode->i_mtime = tv;
0849 inode->i_atime = tv;
0850 inode->i_ctime = tv;
0851
0852 ip->i_extsize = 0;
0853 ip->i_diflags = 0;
0854
0855 if (xfs_has_v3inodes(mp)) {
0856 inode_set_iversion(inode, 1);
0857 ip->i_cowextsize = 0;
0858 ip->i_crtime = tv;
0859 }
0860
0861 flags = XFS_ILOG_CORE;
0862 switch (mode & S_IFMT) {
0863 case S_IFIFO:
0864 case S_IFCHR:
0865 case S_IFBLK:
0866 case S_IFSOCK:
0867 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
0868 flags |= XFS_ILOG_DEV;
0869 break;
0870 case S_IFREG:
0871 case S_IFDIR:
0872 if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
0873 xfs_inode_inherit_flags(ip, pip);
0874 if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
0875 xfs_inode_inherit_flags2(ip, pip);
0876 fallthrough;
0877 case S_IFLNK:
0878 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
0879 ip->i_df.if_bytes = 0;
0880 ip->i_df.if_u1.if_root = NULL;
0881 break;
0882 default:
0883 ASSERT(0);
0884 }
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895 if (init_xattrs && xfs_has_attr(mp)) {
0896 ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
0897 xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
0898 }
0899
0900
0901
0902
0903 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
0904 xfs_trans_log_inode(tp, ip, flags);
0905
0906
0907 xfs_setup_inode(ip);
0908
0909 *ipp = ip;
0910 return 0;
0911 }
0912
0913
0914
0915
0916
0917
0918 static int
0919 xfs_droplink(
0920 xfs_trans_t *tp,
0921 xfs_inode_t *ip)
0922 {
0923 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
0924
0925 drop_nlink(VFS_I(ip));
0926 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
0927
0928 if (VFS_I(ip)->i_nlink)
0929 return 0;
0930
0931 return xfs_iunlink(tp, ip);
0932 }
0933
0934
0935
0936
0937 static void
0938 xfs_bumplink(
0939 xfs_trans_t *tp,
0940 xfs_inode_t *ip)
0941 {
0942 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
0943
0944 inc_nlink(VFS_I(ip));
0945 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
0946 }
0947
0948 int
0949 xfs_create(
0950 struct user_namespace *mnt_userns,
0951 xfs_inode_t *dp,
0952 struct xfs_name *name,
0953 umode_t mode,
0954 dev_t rdev,
0955 bool init_xattrs,
0956 xfs_inode_t **ipp)
0957 {
0958 int is_dir = S_ISDIR(mode);
0959 struct xfs_mount *mp = dp->i_mount;
0960 struct xfs_inode *ip = NULL;
0961 struct xfs_trans *tp = NULL;
0962 int error;
0963 bool unlock_dp_on_error = false;
0964 prid_t prid;
0965 struct xfs_dquot *udqp = NULL;
0966 struct xfs_dquot *gdqp = NULL;
0967 struct xfs_dquot *pdqp = NULL;
0968 struct xfs_trans_res *tres;
0969 uint resblks;
0970 xfs_ino_t ino;
0971
0972 trace_xfs_create(dp, name);
0973
0974 if (xfs_is_shutdown(mp))
0975 return -EIO;
0976
0977 prid = xfs_get_initial_prid(dp);
0978
0979
0980
0981
0982 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
0983 mapped_fsgid(mnt_userns, &init_user_ns), prid,
0984 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
0985 &udqp, &gdqp, &pdqp);
0986 if (error)
0987 return error;
0988
0989 if (is_dir) {
0990 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
0991 tres = &M_RES(mp)->tr_mkdir;
0992 } else {
0993 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
0994 tres = &M_RES(mp)->tr_create;
0995 }
0996
0997
0998
0999
1000
1001
1002
1003 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1004 &tp);
1005 if (error == -ENOSPC) {
1006
1007 xfs_flush_inodes(mp);
1008 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1009 resblks, &tp);
1010 }
1011 if (error)
1012 goto out_release_dquots;
1013
1014 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1015 unlock_dp_on_error = true;
1016
1017
1018
1019
1020
1021
1022 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1023 if (!error)
1024 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1025 is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1026 if (error)
1027 goto out_trans_cancel;
1028
1029
1030
1031
1032
1033
1034
1035
1036 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1037 unlock_dp_on_error = false;
1038
1039 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1040 resblks - XFS_IALLOC_SPACE_RES(mp));
1041 if (error) {
1042 ASSERT(error != -ENOSPC);
1043 goto out_trans_cancel;
1044 }
1045 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1046 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1047
1048 if (is_dir) {
1049 error = xfs_dir_init(tp, ip, dp);
1050 if (error)
1051 goto out_trans_cancel;
1052
1053 xfs_bumplink(tp, dp);
1054 }
1055
1056
1057
1058
1059
1060
1061 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1062 xfs_trans_set_sync(tp);
1063
1064
1065
1066
1067
1068
1069 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1070
1071 error = xfs_trans_commit(tp);
1072 if (error)
1073 goto out_release_inode;
1074
1075 xfs_qm_dqrele(udqp);
1076 xfs_qm_dqrele(gdqp);
1077 xfs_qm_dqrele(pdqp);
1078
1079 *ipp = ip;
1080 return 0;
1081
1082 out_trans_cancel:
1083 xfs_trans_cancel(tp);
1084 out_release_inode:
1085
1086
1087
1088
1089
1090 if (ip) {
1091 xfs_finish_inode_setup(ip);
1092 xfs_irele(ip);
1093 }
1094 out_release_dquots:
1095 xfs_qm_dqrele(udqp);
1096 xfs_qm_dqrele(gdqp);
1097 xfs_qm_dqrele(pdqp);
1098
1099 if (unlock_dp_on_error)
1100 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1101 return error;
1102 }
1103
1104 int
1105 xfs_create_tmpfile(
1106 struct user_namespace *mnt_userns,
1107 struct xfs_inode *dp,
1108 umode_t mode,
1109 struct xfs_inode **ipp)
1110 {
1111 struct xfs_mount *mp = dp->i_mount;
1112 struct xfs_inode *ip = NULL;
1113 struct xfs_trans *tp = NULL;
1114 int error;
1115 prid_t prid;
1116 struct xfs_dquot *udqp = NULL;
1117 struct xfs_dquot *gdqp = NULL;
1118 struct xfs_dquot *pdqp = NULL;
1119 struct xfs_trans_res *tres;
1120 uint resblks;
1121 xfs_ino_t ino;
1122
1123 if (xfs_is_shutdown(mp))
1124 return -EIO;
1125
1126 prid = xfs_get_initial_prid(dp);
1127
1128
1129
1130
1131 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
1132 mapped_fsgid(mnt_userns, &init_user_ns), prid,
1133 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1134 &udqp, &gdqp, &pdqp);
1135 if (error)
1136 return error;
1137
1138 resblks = XFS_IALLOC_SPACE_RES(mp);
1139 tres = &M_RES(mp)->tr_create_tmpfile;
1140
1141 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1142 &tp);
1143 if (error)
1144 goto out_release_dquots;
1145
1146 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1147 if (!error)
1148 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1149 0, 0, prid, false, &ip);
1150 if (error)
1151 goto out_trans_cancel;
1152
1153 if (xfs_has_wsync(mp))
1154 xfs_trans_set_sync(tp);
1155
1156
1157
1158
1159
1160
1161 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1162
1163 error = xfs_iunlink(tp, ip);
1164 if (error)
1165 goto out_trans_cancel;
1166
1167 error = xfs_trans_commit(tp);
1168 if (error)
1169 goto out_release_inode;
1170
1171 xfs_qm_dqrele(udqp);
1172 xfs_qm_dqrele(gdqp);
1173 xfs_qm_dqrele(pdqp);
1174
1175 *ipp = ip;
1176 return 0;
1177
1178 out_trans_cancel:
1179 xfs_trans_cancel(tp);
1180 out_release_inode:
1181
1182
1183
1184
1185
1186 if (ip) {
1187 xfs_finish_inode_setup(ip);
1188 xfs_irele(ip);
1189 }
1190 out_release_dquots:
1191 xfs_qm_dqrele(udqp);
1192 xfs_qm_dqrele(gdqp);
1193 xfs_qm_dqrele(pdqp);
1194
1195 return error;
1196 }
1197
1198 int
1199 xfs_link(
1200 xfs_inode_t *tdp,
1201 xfs_inode_t *sip,
1202 struct xfs_name *target_name)
1203 {
1204 xfs_mount_t *mp = tdp->i_mount;
1205 xfs_trans_t *tp;
1206 int error, nospace_error = 0;
1207 int resblks;
1208
1209 trace_xfs_link(tdp, target_name);
1210
1211 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1212
1213 if (xfs_is_shutdown(mp))
1214 return -EIO;
1215
1216 error = xfs_qm_dqattach(sip);
1217 if (error)
1218 goto std_return;
1219
1220 error = xfs_qm_dqattach(tdp);
1221 if (error)
1222 goto std_return;
1223
1224 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1225 error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1226 &tp, &nospace_error);
1227 if (error)
1228 goto std_return;
1229
1230
1231
1232
1233
1234
1235 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1236 tdp->i_projid != sip->i_projid)) {
1237 error = -EXDEV;
1238 goto error_return;
1239 }
1240
1241 if (!resblks) {
1242 error = xfs_dir_canenter(tp, tdp, target_name);
1243 if (error)
1244 goto error_return;
1245 }
1246
1247
1248
1249
1250 if (VFS_I(sip)->i_nlink == 0) {
1251 struct xfs_perag *pag;
1252
1253 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1254 error = xfs_iunlink_remove(tp, pag, sip);
1255 xfs_perag_put(pag);
1256 if (error)
1257 goto error_return;
1258 }
1259
1260 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1261 resblks);
1262 if (error)
1263 goto error_return;
1264 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1265 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1266
1267 xfs_bumplink(tp, sip);
1268
1269
1270
1271
1272
1273
1274 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1275 xfs_trans_set_sync(tp);
1276
1277 return xfs_trans_commit(tp);
1278
1279 error_return:
1280 xfs_trans_cancel(tp);
1281 std_return:
1282 if (error == -ENOSPC && nospace_error)
1283 error = nospace_error;
1284 return error;
1285 }
1286
1287
1288 static void
1289 xfs_itruncate_clear_reflink_flags(
1290 struct xfs_inode *ip)
1291 {
1292 struct xfs_ifork *dfork;
1293 struct xfs_ifork *cfork;
1294
1295 if (!xfs_is_reflink_inode(ip))
1296 return;
1297 dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1298 cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1299 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1300 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1301 if (cfork->if_bytes == 0)
1302 xfs_inode_clear_cowblocks_tag(ip);
1303 }
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 int
1327 xfs_itruncate_extents_flags(
1328 struct xfs_trans **tpp,
1329 struct xfs_inode *ip,
1330 int whichfork,
1331 xfs_fsize_t new_size,
1332 int flags)
1333 {
1334 struct xfs_mount *mp = ip->i_mount;
1335 struct xfs_trans *tp = *tpp;
1336 xfs_fileoff_t first_unmap_block;
1337 xfs_filblks_t unmap_len;
1338 int error = 0;
1339
1340 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1341 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1342 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1343 ASSERT(new_size <= XFS_ISIZE(ip));
1344 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1345 ASSERT(ip->i_itemp != NULL);
1346 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1347 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1348
1349 trace_xfs_itruncate_extents_start(ip, new_size);
1350
1351 flags |= xfs_bmapi_aflag(whichfork);
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1364 if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1365 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1366 return 0;
1367 }
1368
1369 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1370 while (unmap_len > 0) {
1371 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1372 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1373 flags, XFS_ITRUNC_MAX_EXTENTS);
1374 if (error)
1375 goto out;
1376
1377
1378 error = xfs_defer_finish(&tp);
1379 if (error)
1380 goto out;
1381 }
1382
1383 if (whichfork == XFS_DATA_FORK) {
1384
1385 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1386 first_unmap_block, XFS_MAX_FILEOFF, true);
1387 if (error)
1388 goto out;
1389
1390 xfs_itruncate_clear_reflink_flags(ip);
1391 }
1392
1393
1394
1395
1396
1397 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1398
1399 trace_xfs_itruncate_extents_end(ip, new_size);
1400
1401 out:
1402 *tpp = tp;
1403 return error;
1404 }
1405
1406 int
1407 xfs_release(
1408 xfs_inode_t *ip)
1409 {
1410 xfs_mount_t *mp = ip->i_mount;
1411 int error = 0;
1412
1413 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1414 return 0;
1415
1416
1417 if (xfs_is_readonly(mp))
1418 return 0;
1419
1420 if (!xfs_is_shutdown(mp)) {
1421 int truncated;
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1434 if (truncated) {
1435 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1436 if (ip->i_delayed_blks > 0) {
1437 error = filemap_flush(VFS_I(ip)->i_mapping);
1438 if (error)
1439 return error;
1440 }
1441 }
1442 }
1443
1444 if (VFS_I(ip)->i_nlink == 0)
1445 return 0;
1446
1447
1448
1449
1450
1451
1452
1453 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1454 return 0;
1455
1456 if (xfs_can_free_eofblocks(ip, false)) {
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1472 goto out_unlock;
1473
1474 error = xfs_free_eofblocks(ip);
1475 if (error)
1476 goto out_unlock;
1477
1478
1479 if (ip->i_delayed_blks)
1480 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1481 }
1482
1483 out_unlock:
1484 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1485 return error;
1486 }
1487
1488
1489
1490
1491
1492
1493 STATIC int
1494 xfs_inactive_truncate(
1495 struct xfs_inode *ip)
1496 {
1497 struct xfs_mount *mp = ip->i_mount;
1498 struct xfs_trans *tp;
1499 int error;
1500
1501 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1502 if (error) {
1503 ASSERT(xfs_is_shutdown(mp));
1504 return error;
1505 }
1506 xfs_ilock(ip, XFS_ILOCK_EXCL);
1507 xfs_trans_ijoin(tp, ip, 0);
1508
1509
1510
1511
1512
1513
1514 ip->i_disk_size = 0;
1515 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1516
1517 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1518 if (error)
1519 goto error_trans_cancel;
1520
1521 ASSERT(ip->i_df.if_nextents == 0);
1522
1523 error = xfs_trans_commit(tp);
1524 if (error)
1525 goto error_unlock;
1526
1527 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1528 return 0;
1529
1530 error_trans_cancel:
1531 xfs_trans_cancel(tp);
1532 error_unlock:
1533 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1534 return error;
1535 }
1536
1537
1538
1539
1540
1541
1542 STATIC int
1543 xfs_inactive_ifree(
1544 struct xfs_inode *ip)
1545 {
1546 struct xfs_mount *mp = ip->i_mount;
1547 struct xfs_trans *tp;
1548 int error;
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 if (unlikely(mp->m_finobt_nores)) {
1562 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1563 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1564 &tp);
1565 } else {
1566 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1567 }
1568 if (error) {
1569 if (error == -ENOSPC) {
1570 xfs_warn_ratelimited(mp,
1571 "Failed to remove inode(s) from unlinked list. "
1572 "Please free space, unmount and run xfs_repair.");
1573 } else {
1574 ASSERT(xfs_is_shutdown(mp));
1575 }
1576 return error;
1577 }
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599 xfs_ilock(ip, XFS_ILOCK_EXCL);
1600 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1601
1602 error = xfs_ifree(tp, ip);
1603 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1604 if (error) {
1605
1606
1607
1608
1609
1610 if (!xfs_is_shutdown(mp)) {
1611 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1612 __func__, error);
1613 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1614 }
1615 xfs_trans_cancel(tp);
1616 return error;
1617 }
1618
1619
1620
1621
1622 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1623
1624
1625
1626
1627
1628 error = xfs_trans_commit(tp);
1629 if (error)
1630 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1631 __func__, error);
1632
1633 return 0;
1634 }
1635
1636
1637
1638
1639
1640
1641
1642 bool
1643 xfs_inode_needs_inactive(
1644 struct xfs_inode *ip)
1645 {
1646 struct xfs_mount *mp = ip->i_mount;
1647 struct xfs_ifork *cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1648
1649
1650
1651
1652
1653 if (VFS_I(ip)->i_mode == 0)
1654 return false;
1655
1656
1657 if (xfs_is_readonly(mp))
1658 return false;
1659
1660
1661 if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1662 return false;
1663
1664
1665 if (xfs_is_metadata_inode(ip))
1666 return false;
1667
1668
1669 if (cow_ifp && cow_ifp->if_bytes > 0)
1670 return true;
1671
1672
1673 if (VFS_I(ip)->i_nlink == 0)
1674 return true;
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 return xfs_can_free_eofblocks(ip, true);
1687 }
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 void
1698 xfs_inactive(
1699 xfs_inode_t *ip)
1700 {
1701 struct xfs_mount *mp;
1702 int error;
1703 int truncate = 0;
1704
1705
1706
1707
1708
1709 if (VFS_I(ip)->i_mode == 0) {
1710 ASSERT(ip->i_df.if_broot_bytes == 0);
1711 goto out;
1712 }
1713
1714 mp = ip->i_mount;
1715 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1716
1717
1718 if (xfs_is_readonly(mp))
1719 goto out;
1720
1721
1722 if (xfs_is_metadata_inode(ip))
1723 goto out;
1724
1725
1726 if (xfs_inode_has_cow_data(ip))
1727 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1728
1729 if (VFS_I(ip)->i_nlink != 0) {
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739 if (xfs_can_free_eofblocks(ip, true))
1740 xfs_free_eofblocks(ip);
1741
1742 goto out;
1743 }
1744
1745 if (S_ISREG(VFS_I(ip)->i_mode) &&
1746 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1747 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1748 truncate = 1;
1749
1750 error = xfs_qm_dqattach(ip);
1751 if (error)
1752 goto out;
1753
1754 if (S_ISLNK(VFS_I(ip)->i_mode))
1755 error = xfs_inactive_symlink(ip);
1756 else if (truncate)
1757 error = xfs_inactive_truncate(ip);
1758 if (error)
1759 goto out;
1760
1761
1762
1763
1764
1765
1766 if (xfs_inode_has_attr_fork(ip)) {
1767 error = xfs_attr_inactive(ip);
1768 if (error)
1769 goto out;
1770 }
1771
1772 ASSERT(ip->i_forkoff == 0);
1773
1774
1775
1776
1777 xfs_inactive_ifree(ip);
1778
1779 out:
1780
1781
1782
1783
1784 xfs_qm_dqdetach(ip);
1785 }
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826 static struct xfs_inode *
1827 xfs_iunlink_lookup(
1828 struct xfs_perag *pag,
1829 xfs_agino_t agino)
1830 {
1831 struct xfs_inode *ip;
1832
1833 rcu_read_lock();
1834 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1835
1836
1837
1838
1839
1840 if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
1841 rcu_read_unlock();
1842 return NULL;
1843 }
1844 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1845 rcu_read_unlock();
1846 return ip;
1847 }
1848
1849
1850 static int
1851 xfs_iunlink_update_backref(
1852 struct xfs_perag *pag,
1853 xfs_agino_t prev_agino,
1854 xfs_agino_t next_agino)
1855 {
1856 struct xfs_inode *ip;
1857
1858
1859 if (next_agino == NULLAGINO)
1860 return 0;
1861
1862 ip = xfs_iunlink_lookup(pag, next_agino);
1863 if (!ip)
1864 return -EFSCORRUPTED;
1865 ip->i_prev_unlinked = prev_agino;
1866 return 0;
1867 }
1868
1869
1870
1871
1872
1873 STATIC int
1874 xfs_iunlink_update_bucket(
1875 struct xfs_trans *tp,
1876 struct xfs_perag *pag,
1877 struct xfs_buf *agibp,
1878 unsigned int bucket_index,
1879 xfs_agino_t new_agino)
1880 {
1881 struct xfs_agi *agi = agibp->b_addr;
1882 xfs_agino_t old_value;
1883 int offset;
1884
1885 ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1886
1887 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1888 trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1889 old_value, new_agino);
1890
1891
1892
1893
1894
1895
1896 if (old_value == new_agino) {
1897 xfs_buf_mark_corrupt(agibp);
1898 return -EFSCORRUPTED;
1899 }
1900
1901 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1902 offset = offsetof(struct xfs_agi, agi_unlinked) +
1903 (sizeof(xfs_agino_t) * bucket_index);
1904 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1905 return 0;
1906 }
1907
1908 static int
1909 xfs_iunlink_insert_inode(
1910 struct xfs_trans *tp,
1911 struct xfs_perag *pag,
1912 struct xfs_buf *agibp,
1913 struct xfs_inode *ip)
1914 {
1915 struct xfs_mount *mp = tp->t_mountp;
1916 struct xfs_agi *agi = agibp->b_addr;
1917 xfs_agino_t next_agino;
1918 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1919 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1920 int error;
1921
1922
1923
1924
1925
1926
1927 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1928 if (next_agino == agino ||
1929 !xfs_verify_agino_or_null(pag, next_agino)) {
1930 xfs_buf_mark_corrupt(agibp);
1931 return -EFSCORRUPTED;
1932 }
1933
1934
1935
1936
1937
1938 error = xfs_iunlink_update_backref(pag, agino, next_agino);
1939 if (error)
1940 return error;
1941
1942 if (next_agino != NULLAGINO) {
1943
1944
1945
1946
1947 error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
1948 if (error)
1949 return error;
1950 ip->i_next_unlinked = next_agino;
1951 }
1952
1953
1954 return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
1955 }
1956
1957
1958
1959
1960
1961
1962
1963
1964 STATIC int
1965 xfs_iunlink(
1966 struct xfs_trans *tp,
1967 struct xfs_inode *ip)
1968 {
1969 struct xfs_mount *mp = tp->t_mountp;
1970 struct xfs_perag *pag;
1971 struct xfs_buf *agibp;
1972 int error;
1973
1974 ASSERT(VFS_I(ip)->i_nlink == 0);
1975 ASSERT(VFS_I(ip)->i_mode != 0);
1976 trace_xfs_iunlink(ip);
1977
1978 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1979
1980
1981 error = xfs_read_agi(pag, tp, &agibp);
1982 if (error)
1983 goto out;
1984
1985 error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
1986 out:
1987 xfs_perag_put(pag);
1988 return error;
1989 }
1990
1991 static int
1992 xfs_iunlink_remove_inode(
1993 struct xfs_trans *tp,
1994 struct xfs_perag *pag,
1995 struct xfs_buf *agibp,
1996 struct xfs_inode *ip)
1997 {
1998 struct xfs_mount *mp = tp->t_mountp;
1999 struct xfs_agi *agi = agibp->b_addr;
2000 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2001 xfs_agino_t head_agino;
2002 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2003 int error;
2004
2005 trace_xfs_iunlink_remove(ip);
2006
2007
2008
2009
2010
2011 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2012 if (!xfs_verify_agino(pag, head_agino)) {
2013 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2014 agi, sizeof(*agi));
2015 return -EFSCORRUPTED;
2016 }
2017
2018
2019
2020
2021
2022
2023 error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2024 if (error)
2025 return error;
2026
2027
2028
2029
2030
2031 error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2032 ip->i_next_unlinked);
2033 if (error)
2034 return error;
2035
2036 if (head_agino != agino) {
2037 struct xfs_inode *prev_ip;
2038
2039 prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2040 if (!prev_ip)
2041 return -EFSCORRUPTED;
2042
2043 error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2044 ip->i_next_unlinked);
2045 prev_ip->i_next_unlinked = ip->i_next_unlinked;
2046 } else {
2047
2048 error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2049 ip->i_next_unlinked);
2050 }
2051
2052 ip->i_next_unlinked = NULLAGINO;
2053 ip->i_prev_unlinked = NULLAGINO;
2054 return error;
2055 }
2056
2057
2058
2059
2060 STATIC int
2061 xfs_iunlink_remove(
2062 struct xfs_trans *tp,
2063 struct xfs_perag *pag,
2064 struct xfs_inode *ip)
2065 {
2066 struct xfs_buf *agibp;
2067 int error;
2068
2069 trace_xfs_iunlink_remove(ip);
2070
2071
2072 error = xfs_read_agi(pag, tp, &agibp);
2073 if (error)
2074 return error;
2075
2076 return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2077 }
2078
2079
2080
2081
2082
2083
2084 static void
2085 xfs_ifree_mark_inode_stale(
2086 struct xfs_perag *pag,
2087 struct xfs_inode *free_ip,
2088 xfs_ino_t inum)
2089 {
2090 struct xfs_mount *mp = pag->pag_mount;
2091 struct xfs_inode_log_item *iip;
2092 struct xfs_inode *ip;
2093
2094 retry:
2095 rcu_read_lock();
2096 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2097
2098
2099 if (!ip) {
2100 rcu_read_unlock();
2101 return;
2102 }
2103
2104
2105
2106
2107
2108
2109
2110 spin_lock(&ip->i_flags_lock);
2111 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2112 goto out_iflags_unlock;
2113
2114
2115
2116
2117
2118
2119
2120 if (ip != free_ip) {
2121 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2122 spin_unlock(&ip->i_flags_lock);
2123 rcu_read_unlock();
2124 delay(1);
2125 goto retry;
2126 }
2127 }
2128 ip->i_flags |= XFS_ISTALE;
2129
2130
2131
2132
2133
2134
2135 iip = ip->i_itemp;
2136 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2137 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2138 ASSERT(iip->ili_last_fields);
2139 goto out_iunlock;
2140 }
2141
2142
2143
2144
2145
2146
2147
2148 if (!iip || list_empty(&iip->ili_item.li_bio_list))
2149 goto out_iunlock;
2150
2151 __xfs_iflags_set(ip, XFS_IFLUSHING);
2152 spin_unlock(&ip->i_flags_lock);
2153 rcu_read_unlock();
2154
2155
2156 spin_lock(&iip->ili_lock);
2157 iip->ili_last_fields = iip->ili_fields;
2158 iip->ili_fields = 0;
2159 iip->ili_fsync_fields = 0;
2160 spin_unlock(&iip->ili_lock);
2161 ASSERT(iip->ili_last_fields);
2162
2163 if (ip != free_ip)
2164 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2165 return;
2166
2167 out_iunlock:
2168 if (ip != free_ip)
2169 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2170 out_iflags_unlock:
2171 spin_unlock(&ip->i_flags_lock);
2172 rcu_read_unlock();
2173 }
2174
2175
2176
2177
2178
2179
2180 static int
2181 xfs_ifree_cluster(
2182 struct xfs_trans *tp,
2183 struct xfs_perag *pag,
2184 struct xfs_inode *free_ip,
2185 struct xfs_icluster *xic)
2186 {
2187 struct xfs_mount *mp = free_ip->i_mount;
2188 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2189 struct xfs_buf *bp;
2190 xfs_daddr_t blkno;
2191 xfs_ino_t inum = xic->first_ino;
2192 int nbufs;
2193 int i, j;
2194 int ioffset;
2195 int error;
2196
2197 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2198
2199 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2200
2201
2202
2203
2204
2205 ioffset = inum - xic->first_ino;
2206 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2207 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2208 continue;
2209 }
2210
2211 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2212 XFS_INO_TO_AGBNO(mp, inum));
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2224 mp->m_bsize * igeo->blocks_per_cluster,
2225 XBF_UNMAPPED, &bp);
2226 if (error)
2227 return error;
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 bp->b_ops = &xfs_inode_buf_ops;
2239
2240
2241
2242
2243
2244
2245 for (i = 0; i < igeo->inodes_per_cluster; i++)
2246 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2247
2248 xfs_trans_stale_inode_buf(tp, bp);
2249 xfs_trans_binval(tp, bp);
2250 }
2251 return 0;
2252 }
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263 int
2264 xfs_ifree(
2265 struct xfs_trans *tp,
2266 struct xfs_inode *ip)
2267 {
2268 struct xfs_mount *mp = ip->i_mount;
2269 struct xfs_perag *pag;
2270 struct xfs_icluster xic = { 0 };
2271 struct xfs_inode_log_item *iip = ip->i_itemp;
2272 int error;
2273
2274 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2275 ASSERT(VFS_I(ip)->i_nlink == 0);
2276 ASSERT(ip->i_df.if_nextents == 0);
2277 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2278 ASSERT(ip->i_nblocks == 0);
2279
2280 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2281
2282
2283
2284
2285
2286
2287
2288 error = xfs_difree(tp, pag, ip->i_ino, &xic);
2289 if (error)
2290 goto out;
2291
2292 error = xfs_iunlink_remove(tp, pag, ip);
2293 if (error)
2294 goto out;
2295
2296
2297
2298
2299
2300
2301 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2302 kmem_free(ip->i_df.if_u1.if_data);
2303 ip->i_df.if_u1.if_data = NULL;
2304 ip->i_df.if_bytes = 0;
2305 }
2306
2307 VFS_I(ip)->i_mode = 0;
2308 ip->i_diflags = 0;
2309 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2310 ip->i_forkoff = 0;
2311 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2312 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2313 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2314
2315
2316 spin_lock(&iip->ili_lock);
2317 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2318 spin_unlock(&iip->ili_lock);
2319
2320
2321
2322
2323
2324 VFS_I(ip)->i_generation++;
2325 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2326
2327 if (xic.deleted)
2328 error = xfs_ifree_cluster(tp, pag, ip, &xic);
2329 out:
2330 xfs_perag_put(pag);
2331 return error;
2332 }
2333
2334
2335
2336
2337
2338
2339 static void
2340 xfs_iunpin(
2341 struct xfs_inode *ip)
2342 {
2343 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2344
2345 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2346
2347
2348 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2349
2350 }
2351
2352 static void
2353 __xfs_iunpin_wait(
2354 struct xfs_inode *ip)
2355 {
2356 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2357 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2358
2359 xfs_iunpin(ip);
2360
2361 do {
2362 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2363 if (xfs_ipincount(ip))
2364 io_schedule();
2365 } while (xfs_ipincount(ip));
2366 finish_wait(wq, &wait.wq_entry);
2367 }
2368
2369 void
2370 xfs_iunpin_wait(
2371 struct xfs_inode *ip)
2372 {
2373 if (xfs_ipincount(ip))
2374 __xfs_iunpin_wait(ip);
2375 }
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404 int
2405 xfs_remove(
2406 xfs_inode_t *dp,
2407 struct xfs_name *name,
2408 xfs_inode_t *ip)
2409 {
2410 xfs_mount_t *mp = dp->i_mount;
2411 xfs_trans_t *tp = NULL;
2412 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2413 int dontcare;
2414 int error = 0;
2415 uint resblks;
2416
2417 trace_xfs_remove(dp, name);
2418
2419 if (xfs_is_shutdown(mp))
2420 return -EIO;
2421
2422 error = xfs_qm_dqattach(dp);
2423 if (error)
2424 goto std_return;
2425
2426 error = xfs_qm_dqattach(ip);
2427 if (error)
2428 goto std_return;
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441 resblks = XFS_REMOVE_SPACE_RES(mp);
2442 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2443 &tp, &dontcare);
2444 if (error) {
2445 ASSERT(error != -ENOSPC);
2446 goto std_return;
2447 }
2448
2449
2450
2451
2452 if (is_dir) {
2453 ASSERT(VFS_I(ip)->i_nlink >= 2);
2454 if (VFS_I(ip)->i_nlink != 2) {
2455 error = -ENOTEMPTY;
2456 goto out_trans_cancel;
2457 }
2458 if (!xfs_dir_isempty(ip)) {
2459 error = -ENOTEMPTY;
2460 goto out_trans_cancel;
2461 }
2462
2463
2464 error = xfs_droplink(tp, dp);
2465 if (error)
2466 goto out_trans_cancel;
2467
2468
2469 error = xfs_droplink(tp, ip);
2470 if (error)
2471 goto out_trans_cancel;
2472
2473
2474
2475
2476
2477
2478
2479 if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2480 error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2481 tp->t_mountp->m_sb.sb_rootino, 0);
2482 if (error)
2483 return error;
2484 }
2485 } else {
2486
2487
2488
2489
2490
2491 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2492 }
2493 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2494
2495
2496 error = xfs_droplink(tp, ip);
2497 if (error)
2498 goto out_trans_cancel;
2499
2500 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2501 if (error) {
2502 ASSERT(error != -ENOENT);
2503 goto out_trans_cancel;
2504 }
2505
2506
2507
2508
2509
2510
2511 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2512 xfs_trans_set_sync(tp);
2513
2514 error = xfs_trans_commit(tp);
2515 if (error)
2516 goto std_return;
2517
2518 if (is_dir && xfs_inode_is_filestream(ip))
2519 xfs_filestream_deassociate(ip);
2520
2521 return 0;
2522
2523 out_trans_cancel:
2524 xfs_trans_cancel(tp);
2525 std_return:
2526 return error;
2527 }
2528
2529
2530
2531
2532 #define __XFS_SORT_INODES 5
2533 STATIC void
2534 xfs_sort_for_rename(
2535 struct xfs_inode *dp1,
2536 struct xfs_inode *dp2,
2537 struct xfs_inode *ip1,
2538 struct xfs_inode *ip2,
2539 struct xfs_inode *wip,
2540 struct xfs_inode **i_tab,
2541 int *num_inodes)
2542 {
2543 int i, j;
2544
2545 ASSERT(*num_inodes == __XFS_SORT_INODES);
2546 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2547
2548
2549
2550
2551
2552
2553
2554
2555 i = 0;
2556 i_tab[i++] = dp1;
2557 i_tab[i++] = dp2;
2558 i_tab[i++] = ip1;
2559 if (ip2)
2560 i_tab[i++] = ip2;
2561 if (wip)
2562 i_tab[i++] = wip;
2563 *num_inodes = i;
2564
2565
2566
2567
2568
2569 for (i = 0; i < *num_inodes; i++) {
2570 for (j = 1; j < *num_inodes; j++) {
2571 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2572 struct xfs_inode *temp = i_tab[j];
2573 i_tab[j] = i_tab[j-1];
2574 i_tab[j-1] = temp;
2575 }
2576 }
2577 }
2578 }
2579
2580 static int
2581 xfs_finish_rename(
2582 struct xfs_trans *tp)
2583 {
2584
2585
2586
2587
2588 if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2589 xfs_trans_set_sync(tp);
2590
2591 return xfs_trans_commit(tp);
2592 }
2593
2594
2595
2596
2597
2598
2599 STATIC int
2600 xfs_cross_rename(
2601 struct xfs_trans *tp,
2602 struct xfs_inode *dp1,
2603 struct xfs_name *name1,
2604 struct xfs_inode *ip1,
2605 struct xfs_inode *dp2,
2606 struct xfs_name *name2,
2607 struct xfs_inode *ip2,
2608 int spaceres)
2609 {
2610 int error = 0;
2611 int ip1_flags = 0;
2612 int ip2_flags = 0;
2613 int dp2_flags = 0;
2614
2615
2616 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2617 if (error)
2618 goto out_trans_abort;
2619
2620
2621 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2622 if (error)
2623 goto out_trans_abort;
2624
2625
2626
2627
2628
2629
2630 if (dp1 != dp2) {
2631 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2632
2633 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2634 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2635 dp1->i_ino, spaceres);
2636 if (error)
2637 goto out_trans_abort;
2638
2639
2640 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2641 error = xfs_droplink(tp, dp2);
2642 if (error)
2643 goto out_trans_abort;
2644 xfs_bumplink(tp, dp1);
2645 }
2646
2647
2648
2649
2650
2651
2652
2653 ip1_flags |= XFS_ICHGTIME_CHG;
2654 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2655 }
2656
2657 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2658 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2659 dp2->i_ino, spaceres);
2660 if (error)
2661 goto out_trans_abort;
2662
2663
2664 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2665 error = xfs_droplink(tp, dp1);
2666 if (error)
2667 goto out_trans_abort;
2668 xfs_bumplink(tp, dp2);
2669 }
2670
2671
2672
2673
2674
2675
2676
2677 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2678 ip2_flags |= XFS_ICHGTIME_CHG;
2679 }
2680 }
2681
2682 if (ip1_flags) {
2683 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2684 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2685 }
2686 if (ip2_flags) {
2687 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2688 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2689 }
2690 if (dp2_flags) {
2691 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2692 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2693 }
2694 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2695 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2696 return xfs_finish_rename(tp);
2697
2698 out_trans_abort:
2699 xfs_trans_cancel(tp);
2700 return error;
2701 }
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711 static int
2712 xfs_rename_alloc_whiteout(
2713 struct user_namespace *mnt_userns,
2714 struct xfs_name *src_name,
2715 struct xfs_inode *dp,
2716 struct xfs_inode **wip)
2717 {
2718 struct xfs_inode *tmpfile;
2719 struct qstr name;
2720 int error;
2721
2722 error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
2723 &tmpfile);
2724 if (error)
2725 return error;
2726
2727 name.name = src_name->name;
2728 name.len = src_name->len;
2729 error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2730 if (error) {
2731 xfs_finish_inode_setup(tmpfile);
2732 xfs_irele(tmpfile);
2733 return error;
2734 }
2735
2736
2737
2738
2739
2740
2741 xfs_setup_iops(tmpfile);
2742 xfs_finish_inode_setup(tmpfile);
2743 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2744
2745 *wip = tmpfile;
2746 return 0;
2747 }
2748
2749
2750
2751
2752 int
2753 xfs_rename(
2754 struct user_namespace *mnt_userns,
2755 struct xfs_inode *src_dp,
2756 struct xfs_name *src_name,
2757 struct xfs_inode *src_ip,
2758 struct xfs_inode *target_dp,
2759 struct xfs_name *target_name,
2760 struct xfs_inode *target_ip,
2761 unsigned int flags)
2762 {
2763 struct xfs_mount *mp = src_dp->i_mount;
2764 struct xfs_trans *tp;
2765 struct xfs_inode *wip = NULL;
2766 struct xfs_inode *inodes[__XFS_SORT_INODES];
2767 int i;
2768 int num_inodes = __XFS_SORT_INODES;
2769 bool new_parent = (src_dp != target_dp);
2770 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2771 int spaceres;
2772 bool retried = false;
2773 int error, nospace_error = 0;
2774
2775 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2776
2777 if ((flags & RENAME_EXCHANGE) && !target_ip)
2778 return -EINVAL;
2779
2780
2781
2782
2783
2784
2785 if (flags & RENAME_WHITEOUT) {
2786 error = xfs_rename_alloc_whiteout(mnt_userns, src_name,
2787 target_dp, &wip);
2788 if (error)
2789 return error;
2790
2791
2792 src_name->type = XFS_DIR3_FT_CHRDEV;
2793 }
2794
2795 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2796 inodes, &num_inodes);
2797
2798 retry:
2799 nospace_error = 0;
2800 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2801 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2802 if (error == -ENOSPC) {
2803 nospace_error = error;
2804 spaceres = 0;
2805 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2806 &tp);
2807 }
2808 if (error)
2809 goto out_release_wip;
2810
2811
2812
2813
2814 error = xfs_qm_vop_rename_dqattach(inodes);
2815 if (error)
2816 goto out_trans_cancel;
2817
2818
2819
2820
2821
2822
2823
2824 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2825
2826
2827
2828
2829
2830
2831 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2832 if (new_parent)
2833 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2834 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2835 if (target_ip)
2836 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2837 if (wip)
2838 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2839
2840
2841
2842
2843
2844
2845 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2846 target_dp->i_projid != src_ip->i_projid)) {
2847 error = -EXDEV;
2848 goto out_trans_cancel;
2849 }
2850
2851
2852 if (flags & RENAME_EXCHANGE)
2853 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2854 target_dp, target_name, target_ip,
2855 spaceres);
2856
2857
2858
2859
2860
2861
2862
2863 if (spaceres != 0) {
2864 error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2865 0, false);
2866 if (error == -EDQUOT || error == -ENOSPC) {
2867 if (!retried) {
2868 xfs_trans_cancel(tp);
2869 xfs_blockgc_free_quota(target_dp, 0);
2870 retried = true;
2871 goto retry;
2872 }
2873
2874 nospace_error = error;
2875 spaceres = 0;
2876 error = 0;
2877 }
2878 if (error)
2879 goto out_trans_cancel;
2880 }
2881
2882
2883
2884
2885
2886 if (target_ip == NULL) {
2887
2888
2889
2890
2891 if (!spaceres) {
2892 error = xfs_dir_canenter(tp, target_dp, target_name);
2893 if (error)
2894 goto out_trans_cancel;
2895 }
2896 } else {
2897
2898
2899
2900
2901 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2902 (!xfs_dir_isempty(target_ip) ||
2903 (VFS_I(target_ip)->i_nlink > 2))) {
2904 error = -EEXIST;
2905 goto out_trans_cancel;
2906 }
2907 }
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
2920 if (inodes[i] == wip ||
2921 (inodes[i] == target_ip &&
2922 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
2923 struct xfs_perag *pag;
2924 struct xfs_buf *bp;
2925
2926 pag = xfs_perag_get(mp,
2927 XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
2928 error = xfs_read_agi(pag, tp, &bp);
2929 xfs_perag_put(pag);
2930 if (error)
2931 goto out_trans_cancel;
2932 }
2933 }
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947 if (wip) {
2948 struct xfs_perag *pag;
2949
2950 ASSERT(VFS_I(wip)->i_nlink == 0);
2951
2952 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
2953 error = xfs_iunlink_remove(tp, pag, wip);
2954 xfs_perag_put(pag);
2955 if (error)
2956 goto out_trans_cancel;
2957
2958 xfs_bumplink(tp, wip);
2959 VFS_I(wip)->i_state &= ~I_LINKABLE;
2960 }
2961
2962
2963
2964
2965 if (target_ip == NULL) {
2966
2967
2968
2969
2970
2971 error = xfs_dir_createname(tp, target_dp, target_name,
2972 src_ip->i_ino, spaceres);
2973 if (error)
2974 goto out_trans_cancel;
2975
2976 xfs_trans_ichgtime(tp, target_dp,
2977 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2978
2979 if (new_parent && src_is_directory) {
2980 xfs_bumplink(tp, target_dp);
2981 }
2982 } else {
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992 error = xfs_dir_replace(tp, target_dp, target_name,
2993 src_ip->i_ino, spaceres);
2994 if (error)
2995 goto out_trans_cancel;
2996
2997 xfs_trans_ichgtime(tp, target_dp,
2998 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2999
3000
3001
3002
3003
3004 error = xfs_droplink(tp, target_ip);
3005 if (error)
3006 goto out_trans_cancel;
3007
3008 if (src_is_directory) {
3009
3010
3011
3012 error = xfs_droplink(tp, target_ip);
3013 if (error)
3014 goto out_trans_cancel;
3015 }
3016 }
3017
3018
3019
3020
3021 if (new_parent && src_is_directory) {
3022
3023
3024
3025
3026 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3027 target_dp->i_ino, spaceres);
3028 ASSERT(error != -EEXIST);
3029 if (error)
3030 goto out_trans_cancel;
3031 }
3032
3033
3034
3035
3036
3037
3038
3039
3040 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3041 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3042
3043
3044
3045
3046
3047
3048 if (src_is_directory && (new_parent || target_ip != NULL)) {
3049
3050
3051
3052
3053
3054 error = xfs_droplink(tp, src_dp);
3055 if (error)
3056 goto out_trans_cancel;
3057 }
3058
3059
3060
3061
3062
3063
3064 if (wip)
3065 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3066 spaceres);
3067 else
3068 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3069 spaceres);
3070
3071 if (error)
3072 goto out_trans_cancel;
3073
3074 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3075 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3076 if (new_parent)
3077 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3078
3079 error = xfs_finish_rename(tp);
3080 if (wip)
3081 xfs_irele(wip);
3082 return error;
3083
3084 out_trans_cancel:
3085 xfs_trans_cancel(tp);
3086 out_release_wip:
3087 if (wip)
3088 xfs_irele(wip);
3089 if (error == -ENOSPC && nospace_error)
3090 error = nospace_error;
3091 return error;
3092 }
3093
3094 static int
3095 xfs_iflush(
3096 struct xfs_inode *ip,
3097 struct xfs_buf *bp)
3098 {
3099 struct xfs_inode_log_item *iip = ip->i_itemp;
3100 struct xfs_dinode *dip;
3101 struct xfs_mount *mp = ip->i_mount;
3102 int error;
3103
3104 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3105 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3106 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3107 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3108 ASSERT(iip->ili_item.li_buf == bp);
3109
3110 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3111
3112
3113
3114
3115
3116
3117
3118 error = -EFSCORRUPTED;
3119 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3120 mp, XFS_ERRTAG_IFLUSH_1)) {
3121 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3122 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3123 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3124 goto flush_out;
3125 }
3126 if (S_ISREG(VFS_I(ip)->i_mode)) {
3127 if (XFS_TEST_ERROR(
3128 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3129 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3130 mp, XFS_ERRTAG_IFLUSH_3)) {
3131 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3132 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3133 __func__, ip->i_ino, ip);
3134 goto flush_out;
3135 }
3136 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3137 if (XFS_TEST_ERROR(
3138 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3139 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3140 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3141 mp, XFS_ERRTAG_IFLUSH_4)) {
3142 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3143 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3144 __func__, ip->i_ino, ip);
3145 goto flush_out;
3146 }
3147 }
3148 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3149 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3150 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3151 "%s: detected corrupt incore inode %llu, "
3152 "total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3153 __func__, ip->i_ino,
3154 ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3155 ip->i_nblocks, ip);
3156 goto flush_out;
3157 }
3158 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3159 mp, XFS_ERRTAG_IFLUSH_6)) {
3160 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3161 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3162 __func__, ip->i_ino, ip->i_forkoff, ip);
3163 goto flush_out;
3164 }
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174 if (!xfs_has_v3inodes(mp))
3175 ip->i_flushiter++;
3176
3177
3178
3179
3180
3181 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3182 xfs_ifork_verify_local_data(ip))
3183 goto flush_out;
3184 if (xfs_inode_has_attr_fork(ip) &&
3185 ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3186 xfs_ifork_verify_local_attr(ip))
3187 goto flush_out;
3188
3189
3190
3191
3192
3193
3194 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3195
3196
3197 if (!xfs_has_v3inodes(mp)) {
3198 if (ip->i_flushiter == DI_MAX_FLUSH)
3199 ip->i_flushiter = 0;
3200 }
3201
3202 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3203 if (xfs_inode_has_attr_fork(ip))
3204 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222 error = 0;
3223 flush_out:
3224 spin_lock(&iip->ili_lock);
3225 iip->ili_last_fields = iip->ili_fields;
3226 iip->ili_fields = 0;
3227 iip->ili_fsync_fields = 0;
3228 spin_unlock(&iip->ili_lock);
3229
3230
3231
3232
3233
3234 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3235 &iip->ili_item.li_lsn);
3236
3237
3238 xfs_dinode_calc_crc(mp, dip);
3239 return error;
3240 }
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255 int
3256 xfs_iflush_cluster(
3257 struct xfs_buf *bp)
3258 {
3259 struct xfs_mount *mp = bp->b_mount;
3260 struct xfs_log_item *lip, *n;
3261 struct xfs_inode *ip;
3262 struct xfs_inode_log_item *iip;
3263 int clcount = 0;
3264 int error = 0;
3265
3266
3267
3268
3269
3270 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3271 iip = (struct xfs_inode_log_item *)lip;
3272 ip = iip->ili_inode;
3273
3274
3275
3276
3277 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3278 continue;
3279 if (xfs_ipincount(ip))
3280 continue;
3281
3282
3283
3284
3285
3286
3287
3288
3289 spin_lock(&ip->i_flags_lock);
3290 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3291 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3292 spin_unlock(&ip->i_flags_lock);
3293 continue;
3294 }
3295
3296
3297
3298
3299
3300
3301
3302 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3303 spin_unlock(&ip->i_flags_lock);
3304 continue;
3305 }
3306 __xfs_iflags_set(ip, XFS_IFLUSHING);
3307 spin_unlock(&ip->i_flags_lock);
3308
3309
3310
3311
3312
3313
3314
3315
3316 if (xlog_is_shutdown(mp->m_log)) {
3317 xfs_iunpin_wait(ip);
3318 xfs_iflush_abort(ip);
3319 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3320 error = -EIO;
3321 continue;
3322 }
3323
3324
3325 if (xfs_ipincount(ip)) {
3326 xfs_iflags_clear(ip, XFS_IFLUSHING);
3327 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3328 continue;
3329 }
3330
3331 if (!xfs_inode_clean(ip))
3332 error = xfs_iflush(ip, bp);
3333 else
3334 xfs_iflags_clear(ip, XFS_IFLUSHING);
3335 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3336 if (error)
3337 break;
3338 clcount++;
3339 }
3340
3341 if (error) {
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3353 bp->b_flags |= XBF_ASYNC;
3354 xfs_buf_ioend_fail(bp);
3355 return error;
3356 }
3357
3358 if (!clcount)
3359 return -EAGAIN;
3360
3361 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3362 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3363 return 0;
3364
3365 }
3366
3367
3368 void
3369 xfs_irele(
3370 struct xfs_inode *ip)
3371 {
3372 trace_xfs_irele(ip, _RET_IP_);
3373 iput(VFS_I(ip));
3374 }
3375
3376
3377
3378
3379 int
3380 xfs_log_force_inode(
3381 struct xfs_inode *ip)
3382 {
3383 xfs_csn_t seq = 0;
3384
3385 xfs_ilock(ip, XFS_ILOCK_SHARED);
3386 if (xfs_ipincount(ip))
3387 seq = ip->i_itemp->ili_commit_seq;
3388 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3389
3390 if (!seq)
3391 return 0;
3392 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3393 }
3394
3395
3396
3397
3398
3399
3400
3401
3402 static int
3403 xfs_iolock_two_inodes_and_break_layout(
3404 struct inode *src,
3405 struct inode *dest)
3406 {
3407 int error;
3408
3409 if (src > dest)
3410 swap(src, dest);
3411
3412 retry:
3413
3414 error = break_layout(src, true);
3415 if (error)
3416 return error;
3417 if (src != dest) {
3418 error = break_layout(dest, true);
3419 if (error)
3420 return error;
3421 }
3422
3423
3424 inode_lock(src);
3425 error = break_layout(src, false);
3426 if (error) {
3427 inode_unlock(src);
3428 if (error == -EWOULDBLOCK)
3429 goto retry;
3430 return error;
3431 }
3432
3433 if (src == dest)
3434 return 0;
3435
3436
3437 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3438 error = break_layout(dest, false);
3439 if (error) {
3440 inode_unlock(src);
3441 inode_unlock(dest);
3442 if (error == -EWOULDBLOCK)
3443 goto retry;
3444 return error;
3445 }
3446
3447 return 0;
3448 }
3449
3450 static int
3451 xfs_mmaplock_two_inodes_and_break_dax_layout(
3452 struct xfs_inode *ip1,
3453 struct xfs_inode *ip2)
3454 {
3455 int error;
3456 bool retry;
3457 struct page *page;
3458
3459 if (ip1->i_ino > ip2->i_ino)
3460 swap(ip1, ip2);
3461
3462 again:
3463 retry = false;
3464
3465 xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3466 error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3467 if (error || retry) {
3468 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3469 if (error == 0 && retry)
3470 goto again;
3471 return error;
3472 }
3473
3474 if (ip1 == ip2)
3475 return 0;
3476
3477
3478 xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3479
3480
3481
3482
3483
3484 page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3485 if (page && page_ref_count(page) != 1) {
3486 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3487 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3488 goto again;
3489 }
3490
3491 return 0;
3492 }
3493
3494
3495
3496
3497
3498 int
3499 xfs_ilock2_io_mmap(
3500 struct xfs_inode *ip1,
3501 struct xfs_inode *ip2)
3502 {
3503 int ret;
3504
3505 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3506 if (ret)
3507 return ret;
3508
3509 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3510 ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3511 if (ret) {
3512 inode_unlock(VFS_I(ip2));
3513 if (ip1 != ip2)
3514 inode_unlock(VFS_I(ip1));
3515 return ret;
3516 }
3517 } else
3518 filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3519 VFS_I(ip2)->i_mapping);
3520
3521 return 0;
3522 }
3523
3524
3525 void
3526 xfs_iunlock2_io_mmap(
3527 struct xfs_inode *ip1,
3528 struct xfs_inode *ip2)
3529 {
3530 if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3531 xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3532 if (ip1 != ip2)
3533 xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3534 } else
3535 filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3536 VFS_I(ip2)->i_mapping);
3537
3538 inode_unlock(VFS_I(ip2));
3539 if (ip1 != ip2)
3540 inode_unlock(VFS_I(ip1));
3541 }