0001
0002
0003
0004
0005
0006
0007 #include <linux/slab.h>
0008 #include <linux/spinlock.h>
0009 #include <linux/completion.h>
0010 #include <linux/buffer_head.h>
0011 #include <linux/namei.h>
0012 #include <linux/mm.h>
0013 #include <linux/cred.h>
0014 #include <linux/xattr.h>
0015 #include <linux/posix_acl.h>
0016 #include <linux/gfs2_ondisk.h>
0017 #include <linux/crc32.h>
0018 #include <linux/iomap.h>
0019 #include <linux/security.h>
0020 #include <linux/fiemap.h>
0021 #include <linux/uaccess.h>
0022
0023 #include "gfs2.h"
0024 #include "incore.h"
0025 #include "acl.h"
0026 #include "bmap.h"
0027 #include "dir.h"
0028 #include "xattr.h"
0029 #include "glock.h"
0030 #include "inode.h"
0031 #include "meta_io.h"
0032 #include "quota.h"
0033 #include "rgrp.h"
0034 #include "trans.h"
0035 #include "util.h"
0036 #include "super.h"
0037 #include "glops.h"
0038
0039 static const struct inode_operations gfs2_file_iops;
0040 static const struct inode_operations gfs2_dir_iops;
0041 static const struct inode_operations gfs2_symlink_iops;
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 static void gfs2_set_iop(struct inode *inode)
0052 {
0053 struct gfs2_sbd *sdp = GFS2_SB(inode);
0054 umode_t mode = inode->i_mode;
0055
0056 if (S_ISREG(mode)) {
0057 inode->i_op = &gfs2_file_iops;
0058 if (gfs2_localflocks(sdp))
0059 inode->i_fop = &gfs2_file_fops_nolock;
0060 else
0061 inode->i_fop = &gfs2_file_fops;
0062 } else if (S_ISDIR(mode)) {
0063 inode->i_op = &gfs2_dir_iops;
0064 if (gfs2_localflocks(sdp))
0065 inode->i_fop = &gfs2_dir_fops_nolock;
0066 else
0067 inode->i_fop = &gfs2_dir_fops;
0068 } else if (S_ISLNK(mode)) {
0069 inode->i_op = &gfs2_symlink_iops;
0070 } else {
0071 inode->i_op = &gfs2_file_iops;
0072 init_special_inode(inode, inode->i_mode, inode->i_rdev);
0073 }
0074 }
0075
0076 static int iget_test(struct inode *inode, void *opaque)
0077 {
0078 u64 no_addr = *(u64 *)opaque;
0079
0080 return GFS2_I(inode)->i_no_addr == no_addr;
0081 }
0082
0083 static int iget_set(struct inode *inode, void *opaque)
0084 {
0085 u64 no_addr = *(u64 *)opaque;
0086
0087 GFS2_I(inode)->i_no_addr = no_addr;
0088 inode->i_ino = no_addr;
0089 return 0;
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
0115 u64 no_addr, u64 no_formal_ino,
0116 unsigned int blktype)
0117 {
0118 struct inode *inode;
0119 struct gfs2_inode *ip;
0120 struct gfs2_holder i_gh;
0121 int error;
0122
0123 gfs2_holder_mark_uninitialized(&i_gh);
0124 inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
0125 if (!inode)
0126 return ERR_PTR(-ENOMEM);
0127
0128 ip = GFS2_I(inode);
0129
0130 if (inode->i_state & I_NEW) {
0131 struct gfs2_sbd *sdp = GFS2_SB(inode);
0132 struct gfs2_glock *io_gl;
0133
0134 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE,
0135 &ip->i_gl);
0136 if (unlikely(error))
0137 goto fail;
0138
0139 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE,
0140 &io_gl);
0141 if (unlikely(error))
0142 goto fail;
0143
0144 if (blktype != GFS2_BLKST_UNLINKED)
0145 gfs2_cancel_delete_work(io_gl);
0146 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT,
0147 &ip->i_iopen_gh);
0148 gfs2_glock_put(io_gl);
0149 if (unlikely(error))
0150 goto fail;
0151
0152 if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
0153
0154
0155
0156
0157
0158 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
0159 GL_SKIP, &i_gh);
0160 if (error)
0161 goto fail;
0162
0163 error = -ESTALE;
0164 if (no_formal_ino &&
0165 gfs2_inode_already_deleted(ip->i_gl, no_formal_ino))
0166 goto fail;
0167
0168 if (blktype != GFS2_BLKST_FREE) {
0169 error = gfs2_check_blk_type(sdp, no_addr,
0170 blktype);
0171 if (error)
0172 goto fail;
0173 }
0174 }
0175
0176 set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
0177
0178
0179 inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
0180 inode->i_atime.tv_nsec = 0;
0181
0182 glock_set_object(ip->i_gl, ip);
0183
0184 if (type == DT_UNKNOWN) {
0185
0186 error = gfs2_instantiate(&i_gh);
0187 if (error) {
0188 glock_clear_object(ip->i_gl, ip);
0189 goto fail;
0190 }
0191 } else {
0192 ip->i_no_formal_ino = no_formal_ino;
0193 inode->i_mode = DT2IF(type);
0194 }
0195
0196 if (gfs2_holder_initialized(&i_gh))
0197 gfs2_glock_dq_uninit(&i_gh);
0198 glock_set_object(ip->i_iopen_gh.gh_gl, ip);
0199
0200 gfs2_set_iop(inode);
0201 unlock_new_inode(inode);
0202 }
0203
0204 if (no_formal_ino && ip->i_no_formal_ino &&
0205 no_formal_ino != ip->i_no_formal_ino) {
0206 iput(inode);
0207 return ERR_PTR(-ESTALE);
0208 }
0209
0210 return inode;
0211
0212 fail:
0213 if (gfs2_holder_initialized(&ip->i_iopen_gh))
0214 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
0215 if (gfs2_holder_initialized(&i_gh))
0216 gfs2_glock_dq_uninit(&i_gh);
0217 iget_failed(inode);
0218 return ERR_PTR(error);
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228 struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
0229 u64 no_formal_ino, unsigned int blktype)
0230 {
0231 struct super_block *sb = sdp->sd_vfs;
0232 struct inode *inode;
0233 int error;
0234
0235 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, no_formal_ino,
0236 blktype);
0237 if (IS_ERR(inode))
0238 return inode;
0239
0240 if (no_formal_ino) {
0241 error = -EIO;
0242 if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
0243 goto fail_iput;
0244 }
0245 return inode;
0246
0247 fail_iput:
0248 iput(inode);
0249 return ERR_PTR(error);
0250 }
0251
0252
0253 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
0254 {
0255 struct qstr qstr;
0256 struct inode *inode;
0257 gfs2_str2qstr(&qstr, name);
0258 inode = gfs2_lookupi(dip, &qstr, 1);
0259
0260
0261
0262
0263
0264 if (inode == NULL)
0265 return ERR_PTR(-ENOENT);
0266 else
0267 return inode;
0268 }
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
0286 int is_root)
0287 {
0288 struct super_block *sb = dir->i_sb;
0289 struct gfs2_inode *dip = GFS2_I(dir);
0290 struct gfs2_holder d_gh;
0291 int error = 0;
0292 struct inode *inode = NULL;
0293
0294 gfs2_holder_mark_uninitialized(&d_gh);
0295 if (!name->len || name->len > GFS2_FNAMESIZE)
0296 return ERR_PTR(-ENAMETOOLONG);
0297
0298 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
0299 (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
0300 dir == d_inode(sb->s_root))) {
0301 igrab(dir);
0302 return dir;
0303 }
0304
0305 if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) {
0306 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
0307 if (error)
0308 return ERR_PTR(error);
0309 }
0310
0311 if (!is_root) {
0312 error = gfs2_permission(&init_user_ns, dir, MAY_EXEC);
0313 if (error)
0314 goto out;
0315 }
0316
0317 inode = gfs2_dir_search(dir, name, false);
0318 if (IS_ERR(inode))
0319 error = PTR_ERR(inode);
0320 out:
0321 if (gfs2_holder_initialized(&d_gh))
0322 gfs2_glock_dq_uninit(&d_gh);
0323 if (error == -ENOENT)
0324 return NULL;
0325 return inode ? inode : ERR_PTR(error);
0326 }
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
0338 umode_t mode)
0339 {
0340 int error;
0341
0342 error = gfs2_permission(&init_user_ns, &dip->i_inode,
0343 MAY_WRITE | MAY_EXEC);
0344 if (error)
0345 return error;
0346
0347
0348 if (!dip->i_inode.i_nlink)
0349 return -ENOENT;
0350
0351 if (dip->i_entries == (u32)-1)
0352 return -EFBIG;
0353 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
0354 return -EMLINK;
0355
0356 return 0;
0357 }
0358
0359 static void munge_mode_uid_gid(const struct gfs2_inode *dip,
0360 struct inode *inode)
0361 {
0362 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
0363 (dip->i_inode.i_mode & S_ISUID) &&
0364 !uid_eq(dip->i_inode.i_uid, GLOBAL_ROOT_UID)) {
0365 if (S_ISDIR(inode->i_mode))
0366 inode->i_mode |= S_ISUID;
0367 else if (!uid_eq(dip->i_inode.i_uid, current_fsuid()))
0368 inode->i_mode &= ~07111;
0369 inode->i_uid = dip->i_inode.i_uid;
0370 } else
0371 inode->i_uid = current_fsuid();
0372
0373 if (dip->i_inode.i_mode & S_ISGID) {
0374 if (S_ISDIR(inode->i_mode))
0375 inode->i_mode |= S_ISGID;
0376 inode->i_gid = dip->i_inode.i_gid;
0377 } else
0378 inode->i_gid = current_fsgid();
0379 }
0380
0381 static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
0382 {
0383 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0384 struct gfs2_alloc_parms ap = { .target = *dblocks, .aflags = flags, };
0385 int error;
0386
0387 error = gfs2_quota_lock_check(ip, &ap);
0388 if (error)
0389 goto out;
0390
0391 error = gfs2_inplace_reserve(ip, &ap);
0392 if (error)
0393 goto out_quota;
0394
0395 error = gfs2_trans_begin(sdp, (*dblocks * RES_RG_BIT) + RES_STATFS + RES_QUOTA, 0);
0396 if (error)
0397 goto out_ipreserv;
0398
0399 error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation);
0400 ip->i_no_formal_ino = ip->i_generation;
0401 ip->i_inode.i_ino = ip->i_no_addr;
0402 ip->i_goal = ip->i_no_addr;
0403
0404 gfs2_trans_end(sdp);
0405
0406 out_ipreserv:
0407 gfs2_inplace_release(ip);
0408 out_quota:
0409 gfs2_quota_unlock(ip);
0410 out:
0411 return error;
0412 }
0413
0414 static void gfs2_init_dir(struct buffer_head *dibh,
0415 const struct gfs2_inode *parent)
0416 {
0417 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
0418 struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
0419
0420 gfs2_qstr2dirent(&gfs2_qdot, GFS2_DIRENT_SIZE(gfs2_qdot.len), dent);
0421 dent->de_inum = di->di_num;
0422 dent->de_type = cpu_to_be16(DT_DIR);
0423
0424 dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
0425 gfs2_qstr2dirent(&gfs2_qdotdot, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
0426 gfs2_inum_out(parent, dent);
0427 dent->de_type = cpu_to_be16(DT_DIR);
0428
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439 static void gfs2_init_xattr(struct gfs2_inode *ip)
0440 {
0441 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
0442 struct buffer_head *bh;
0443 struct gfs2_ea_header *ea;
0444
0445 bh = gfs2_meta_new(ip->i_gl, ip->i_eattr);
0446 gfs2_trans_add_meta(ip->i_gl, bh);
0447 gfs2_metatype_set(bh, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
0448 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
0449
0450 ea = GFS2_EA_BH2FIRST(bh);
0451 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
0452 ea->ea_type = GFS2_EATYPE_UNUSED;
0453 ea->ea_flags = GFS2_EAFLAG_LAST;
0454
0455 brelse(bh);
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
0467 const char *symname)
0468 {
0469 struct gfs2_dinode *di;
0470 struct buffer_head *dibh;
0471
0472 dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr);
0473 gfs2_trans_add_meta(ip->i_gl, dibh);
0474 di = (struct gfs2_dinode *)dibh->b_data;
0475 gfs2_dinode_out(ip, di);
0476
0477 di->di_major = cpu_to_be32(imajor(&ip->i_inode));
0478 di->di_minor = cpu_to_be32(iminor(&ip->i_inode));
0479 di->__pad1 = 0;
0480 di->__pad2 = 0;
0481 di->__pad3 = 0;
0482 memset(&di->__pad4, 0, sizeof(di->__pad4));
0483 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
0484 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
0485
0486 switch(ip->i_inode.i_mode & S_IFMT) {
0487 case S_IFDIR:
0488 gfs2_init_dir(dibh, dip);
0489 break;
0490 case S_IFLNK:
0491 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, ip->i_inode.i_size);
0492 break;
0493 }
0494
0495 set_buffer_uptodate(dibh);
0496 brelse(dibh);
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513 static unsigned gfs2_trans_da_blks(const struct gfs2_inode *dip,
0514 const struct gfs2_diradd *da,
0515 unsigned nr_inodes)
0516 {
0517 return da->nr_blocks + gfs2_rg_blocks(dip, da->nr_blocks) +
0518 (nr_inodes * RES_DINODE) + RES_QUOTA + RES_STATFS;
0519 }
0520
0521 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
0522 struct gfs2_inode *ip, struct gfs2_diradd *da)
0523 {
0524 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
0525 struct gfs2_alloc_parms ap = { .target = da->nr_blocks, };
0526 int error;
0527
0528 if (da->nr_blocks) {
0529 error = gfs2_quota_lock_check(dip, &ap);
0530 if (error)
0531 goto fail_quota_locks;
0532
0533 error = gfs2_inplace_reserve(dip, &ap);
0534 if (error)
0535 goto fail_quota_locks;
0536
0537 error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, da, 2), 0);
0538 if (error)
0539 goto fail_ipreserv;
0540 } else {
0541 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
0542 if (error)
0543 goto fail_quota_locks;
0544 }
0545
0546 error = gfs2_dir_add(&dip->i_inode, name, ip, da);
0547
0548 gfs2_trans_end(sdp);
0549 fail_ipreserv:
0550 gfs2_inplace_release(dip);
0551 fail_quota_locks:
0552 gfs2_quota_unlock(dip);
0553 return error;
0554 }
0555
0556 static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array,
0557 void *fs_info)
0558 {
0559 const struct xattr *xattr;
0560 int err = 0;
0561
0562 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
0563 err = __gfs2_xattr_set(inode, xattr->name, xattr->value,
0564 xattr->value_len, 0,
0565 GFS2_EATYPE_SECURITY);
0566 if (err < 0)
0567 break;
0568 }
0569 return err;
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
0587 struct file *file,
0588 umode_t mode, dev_t dev, const char *symname,
0589 unsigned int size, int excl)
0590 {
0591 const struct qstr *name = &dentry->d_name;
0592 struct posix_acl *default_acl, *acl;
0593 struct gfs2_holder ghs[2];
0594 struct inode *inode = NULL;
0595 struct gfs2_inode *dip = GFS2_I(dir), *ip;
0596 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
0597 struct gfs2_glock *io_gl;
0598 int error, free_vfs_inode = 1;
0599 u32 aflags = 0;
0600 unsigned blocks = 1;
0601 struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
0602
0603 if (!name->len || name->len > GFS2_FNAMESIZE)
0604 return -ENAMETOOLONG;
0605
0606 error = gfs2_qa_get(dip);
0607 if (error)
0608 return error;
0609
0610 error = gfs2_rindex_update(sdp);
0611 if (error)
0612 goto fail;
0613
0614 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
0615 if (error)
0616 goto fail;
0617 gfs2_holder_mark_uninitialized(ghs + 1);
0618
0619 error = create_ok(dip, name, mode);
0620 if (error)
0621 goto fail_gunlock;
0622
0623 inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl);
0624 error = PTR_ERR(inode);
0625 if (!IS_ERR(inode)) {
0626 if (S_ISDIR(inode->i_mode)) {
0627 iput(inode);
0628 inode = ERR_PTR(-EISDIR);
0629 goto fail_gunlock;
0630 }
0631 d_instantiate(dentry, inode);
0632 error = 0;
0633 if (file) {
0634 if (S_ISREG(inode->i_mode))
0635 error = finish_open(file, dentry, gfs2_open_common);
0636 else
0637 error = finish_no_open(file, NULL);
0638 }
0639 gfs2_glock_dq_uninit(ghs);
0640 goto fail;
0641 } else if (error != -ENOENT) {
0642 goto fail_gunlock;
0643 }
0644
0645 error = gfs2_diradd_alloc_required(dir, name, &da);
0646 if (error < 0)
0647 goto fail_gunlock;
0648
0649 inode = new_inode(sdp->sd_vfs);
0650 error = -ENOMEM;
0651 if (!inode)
0652 goto fail_gunlock;
0653
0654 error = posix_acl_create(dir, &mode, &default_acl, &acl);
0655 if (error)
0656 goto fail_gunlock;
0657
0658 ip = GFS2_I(inode);
0659 error = gfs2_qa_get(ip);
0660 if (error)
0661 goto fail_free_acls;
0662
0663 inode->i_mode = mode;
0664 set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
0665 inode->i_rdev = dev;
0666 inode->i_size = size;
0667 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
0668 munge_mode_uid_gid(dip, inode);
0669 check_and_update_goal(dip);
0670 ip->i_goal = dip->i_goal;
0671 ip->i_diskflags = 0;
0672 ip->i_eattr = 0;
0673 ip->i_height = 0;
0674 ip->i_depth = 0;
0675 ip->i_entries = 0;
0676 ip->i_no_addr = 0;
0677
0678 switch(mode & S_IFMT) {
0679 case S_IFREG:
0680 if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
0681 gfs2_tune_get(sdp, gt_new_files_jdata))
0682 ip->i_diskflags |= GFS2_DIF_JDATA;
0683 gfs2_set_aops(inode);
0684 break;
0685 case S_IFDIR:
0686 ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA);
0687 ip->i_diskflags |= GFS2_DIF_JDATA;
0688 ip->i_entries = 2;
0689 break;
0690 }
0691
0692
0693 if (dip->i_diskflags & GFS2_DIF_SYSTEM)
0694 ip->i_diskflags |= GFS2_DIF_SYSTEM;
0695
0696 gfs2_set_inode_flags(inode);
0697
0698 if ((GFS2_I(d_inode(sdp->sd_root_dir)) == dip) ||
0699 (dip->i_diskflags & GFS2_DIF_TOPDIR))
0700 aflags |= GFS2_AF_ORLOV;
0701
0702 if (default_acl || acl)
0703 blocks++;
0704
0705 error = alloc_dinode(ip, aflags, &blocks);
0706 if (error)
0707 goto fail_free_inode;
0708
0709 gfs2_set_inode_blocks(inode, blocks);
0710
0711 error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
0712 if (error)
0713 goto fail_free_inode;
0714
0715 error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
0716 if (error)
0717 goto fail_free_inode;
0718 gfs2_cancel_delete_work(io_gl);
0719
0720 error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
0721 BUG_ON(error);
0722
0723 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
0724 if (error)
0725 goto fail_gunlock2;
0726
0727 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
0728 if (error)
0729 goto fail_gunlock3;
0730
0731 error = gfs2_trans_begin(sdp, blocks, 0);
0732 if (error)
0733 goto fail_gunlock3;
0734
0735 if (blocks > 1) {
0736 ip->i_eattr = ip->i_no_addr + 1;
0737 gfs2_init_xattr(ip);
0738 }
0739 init_dinode(dip, ip, symname);
0740 gfs2_trans_end(sdp);
0741
0742 glock_set_object(ip->i_gl, ip);
0743 glock_set_object(io_gl, ip);
0744 gfs2_set_iop(inode);
0745
0746 free_vfs_inode = 0;
0747
0748
0749 if (default_acl) {
0750 error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
0751 if (error)
0752 goto fail_gunlock4;
0753 posix_acl_release(default_acl);
0754 default_acl = NULL;
0755 }
0756 if (acl) {
0757 error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
0758 if (error)
0759 goto fail_gunlock4;
0760 posix_acl_release(acl);
0761 acl = NULL;
0762 }
0763
0764 error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
0765 &gfs2_initxattrs, NULL);
0766 if (error)
0767 goto fail_gunlock4;
0768
0769 error = link_dinode(dip, name, ip, &da);
0770 if (error)
0771 goto fail_gunlock4;
0772
0773 mark_inode_dirty(inode);
0774 d_instantiate(dentry, inode);
0775
0776
0777 if (file) {
0778 file->f_mode |= FMODE_CREATED;
0779 error = finish_open(file, dentry, gfs2_open_common);
0780 }
0781 gfs2_glock_dq_uninit(ghs);
0782 gfs2_qa_put(ip);
0783 gfs2_glock_dq_uninit(ghs + 1);
0784 gfs2_glock_put(io_gl);
0785 gfs2_qa_put(dip);
0786 unlock_new_inode(inode);
0787 return error;
0788
0789 fail_gunlock4:
0790 glock_clear_object(ip->i_gl, ip);
0791 glock_clear_object(io_gl, ip);
0792 fail_gunlock3:
0793 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
0794 fail_gunlock2:
0795 gfs2_glock_put(io_gl);
0796 fail_free_inode:
0797 if (ip->i_gl) {
0798 if (free_vfs_inode)
0799 gfs2_glock_put(ip->i_gl);
0800 }
0801 gfs2_rs_deltree(&ip->i_res);
0802 gfs2_qa_put(ip);
0803 fail_free_acls:
0804 posix_acl_release(default_acl);
0805 posix_acl_release(acl);
0806 fail_gunlock:
0807 gfs2_dir_no_add(&da);
0808 gfs2_glock_dq_uninit(ghs);
0809 if (!IS_ERR_OR_NULL(inode)) {
0810 clear_nlink(inode);
0811 if (!free_vfs_inode)
0812 mark_inode_dirty(inode);
0813 set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
0814 &GFS2_I(inode)->i_flags);
0815 if (inode->i_state & I_NEW)
0816 iget_failed(inode);
0817 else
0818 iput(inode);
0819 }
0820 if (gfs2_holder_initialized(ghs + 1))
0821 gfs2_glock_dq_uninit(ghs + 1);
0822 fail:
0823 gfs2_qa_put(dip);
0824 return error;
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838 static int gfs2_create(struct user_namespace *mnt_userns, struct inode *dir,
0839 struct dentry *dentry, umode_t mode, bool excl)
0840 {
0841 return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl);
0842 }
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854 static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
0855 struct file *file)
0856 {
0857 struct inode *inode;
0858 struct dentry *d;
0859 struct gfs2_holder gh;
0860 struct gfs2_glock *gl;
0861 int error;
0862
0863 inode = gfs2_lookupi(dir, &dentry->d_name, 0);
0864 if (inode == NULL) {
0865 d_add(dentry, NULL);
0866 return NULL;
0867 }
0868 if (IS_ERR(inode))
0869 return ERR_CAST(inode);
0870
0871 gl = GFS2_I(inode)->i_gl;
0872 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
0873 if (error) {
0874 iput(inode);
0875 return ERR_PTR(error);
0876 }
0877
0878 d = d_splice_alias(inode, dentry);
0879 if (IS_ERR(d)) {
0880 gfs2_glock_dq_uninit(&gh);
0881 return d;
0882 }
0883 if (file && S_ISREG(inode->i_mode))
0884 error = finish_open(file, dentry, gfs2_open_common);
0885
0886 gfs2_glock_dq_uninit(&gh);
0887 if (error) {
0888 dput(d);
0889 return ERR_PTR(error);
0890 }
0891 return d;
0892 }
0893
0894 static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
0895 unsigned flags)
0896 {
0897 return __gfs2_lookup(dir, dentry, NULL);
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912 static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
0913 struct dentry *dentry)
0914 {
0915 struct gfs2_inode *dip = GFS2_I(dir);
0916 struct gfs2_sbd *sdp = GFS2_SB(dir);
0917 struct inode *inode = d_inode(old_dentry);
0918 struct gfs2_inode *ip = GFS2_I(inode);
0919 struct gfs2_holder ghs[2];
0920 struct buffer_head *dibh;
0921 struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, };
0922 int error;
0923
0924 if (S_ISDIR(inode->i_mode))
0925 return -EPERM;
0926
0927 error = gfs2_qa_get(dip);
0928 if (error)
0929 return error;
0930
0931 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
0932 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
0933
0934 error = gfs2_glock_nq(ghs);
0935 if (error)
0936 goto out_parent;
0937
0938 error = gfs2_glock_nq(ghs + 1);
0939 if (error)
0940 goto out_child;
0941
0942 error = -ENOENT;
0943 if (inode->i_nlink == 0)
0944 goto out_gunlock;
0945
0946 error = gfs2_permission(&init_user_ns, dir, MAY_WRITE | MAY_EXEC);
0947 if (error)
0948 goto out_gunlock;
0949
0950 error = gfs2_dir_check(dir, &dentry->d_name, NULL);
0951 switch (error) {
0952 case -ENOENT:
0953 break;
0954 case 0:
0955 error = -EEXIST;
0956 goto out_gunlock;
0957 default:
0958 goto out_gunlock;
0959 }
0960
0961 error = -EINVAL;
0962 if (!dip->i_inode.i_nlink)
0963 goto out_gunlock;
0964 error = -EFBIG;
0965 if (dip->i_entries == (u32)-1)
0966 goto out_gunlock;
0967 error = -EPERM;
0968 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
0969 goto out_gunlock;
0970 error = -EINVAL;
0971 if (!ip->i_inode.i_nlink)
0972 goto out_gunlock;
0973 error = -EMLINK;
0974 if (ip->i_inode.i_nlink == (u32)-1)
0975 goto out_gunlock;
0976
0977 error = gfs2_diradd_alloc_required(dir, &dentry->d_name, &da);
0978 if (error < 0)
0979 goto out_gunlock;
0980
0981 if (da.nr_blocks) {
0982 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
0983 error = gfs2_quota_lock_check(dip, &ap);
0984 if (error)
0985 goto out_gunlock;
0986
0987 error = gfs2_inplace_reserve(dip, &ap);
0988 if (error)
0989 goto out_gunlock_q;
0990
0991 error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, &da, 2), 0);
0992 if (error)
0993 goto out_ipres;
0994 } else {
0995 error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0);
0996 if (error)
0997 goto out_ipres;
0998 }
0999
1000 error = gfs2_meta_inode_buffer(ip, &dibh);
1001 if (error)
1002 goto out_end_trans;
1003
1004 error = gfs2_dir_add(dir, &dentry->d_name, ip, &da);
1005 if (error)
1006 goto out_brelse;
1007
1008 gfs2_trans_add_meta(ip->i_gl, dibh);
1009 inc_nlink(&ip->i_inode);
1010 ip->i_inode.i_ctime = current_time(&ip->i_inode);
1011 ihold(inode);
1012 d_instantiate(dentry, inode);
1013 mark_inode_dirty(inode);
1014
1015 out_brelse:
1016 brelse(dibh);
1017 out_end_trans:
1018 gfs2_trans_end(sdp);
1019 out_ipres:
1020 if (da.nr_blocks)
1021 gfs2_inplace_release(dip);
1022 out_gunlock_q:
1023 if (da.nr_blocks)
1024 gfs2_quota_unlock(dip);
1025 out_gunlock:
1026 gfs2_dir_no_add(&da);
1027 gfs2_glock_dq(ghs + 1);
1028 out_child:
1029 gfs2_glock_dq(ghs);
1030 out_parent:
1031 gfs2_qa_put(dip);
1032 gfs2_holder_uninit(ghs);
1033 gfs2_holder_uninit(ghs + 1);
1034 return error;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1049 const struct gfs2_inode *ip)
1050 {
1051 int error;
1052
1053 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1054 return -EPERM;
1055
1056 if ((dip->i_inode.i_mode & S_ISVTX) &&
1057 !uid_eq(dip->i_inode.i_uid, current_fsuid()) &&
1058 !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER))
1059 return -EPERM;
1060
1061 if (IS_APPEND(&dip->i_inode))
1062 return -EPERM;
1063
1064 error = gfs2_permission(&init_user_ns, &dip->i_inode,
1065 MAY_WRITE | MAY_EXEC);
1066 if (error)
1067 return error;
1068
1069 return gfs2_dir_check(&dip->i_inode, name, ip);
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 static int gfs2_unlink_inode(struct gfs2_inode *dip,
1084 const struct dentry *dentry)
1085 {
1086 struct inode *inode = d_inode(dentry);
1087 struct gfs2_inode *ip = GFS2_I(inode);
1088 int error;
1089
1090 error = gfs2_dir_del(dip, dentry);
1091 if (error)
1092 return error;
1093
1094 ip->i_entries = 0;
1095 inode->i_ctime = current_time(inode);
1096 if (S_ISDIR(inode->i_mode))
1097 clear_nlink(inode);
1098 else
1099 drop_nlink(inode);
1100 mark_inode_dirty(inode);
1101 if (inode->i_nlink == 0)
1102 gfs2_unlink_di(inode);
1103 return 0;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
1119 {
1120 struct gfs2_inode *dip = GFS2_I(dir);
1121 struct gfs2_sbd *sdp = GFS2_SB(dir);
1122 struct inode *inode = d_inode(dentry);
1123 struct gfs2_inode *ip = GFS2_I(inode);
1124 struct gfs2_holder ghs[3];
1125 struct gfs2_rgrpd *rgd;
1126 int error;
1127
1128 error = gfs2_rindex_update(sdp);
1129 if (error)
1130 return error;
1131
1132 error = -EROFS;
1133
1134 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
1135 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
1136
1137 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1138 if (!rgd)
1139 goto out_inodes;
1140
1141 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2);
1142
1143
1144 error = gfs2_glock_nq(ghs);
1145 if (error)
1146 goto out_parent;
1147
1148 error = gfs2_glock_nq(ghs + 1);
1149 if (error)
1150 goto out_child;
1151
1152 error = -ENOENT;
1153 if (inode->i_nlink == 0)
1154 goto out_rgrp;
1155
1156 if (S_ISDIR(inode->i_mode)) {
1157 error = -ENOTEMPTY;
1158 if (ip->i_entries > 2 || inode->i_nlink > 2)
1159 goto out_rgrp;
1160 }
1161
1162 error = gfs2_glock_nq(ghs + 2);
1163 if (error)
1164 goto out_rgrp;
1165
1166 error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
1167 if (error)
1168 goto out_gunlock;
1169
1170 error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0);
1171 if (error)
1172 goto out_gunlock;
1173
1174 error = gfs2_unlink_inode(dip, dentry);
1175 gfs2_trans_end(sdp);
1176
1177 out_gunlock:
1178 gfs2_glock_dq(ghs + 2);
1179 out_rgrp:
1180 gfs2_glock_dq(ghs + 1);
1181 out_child:
1182 gfs2_glock_dq(ghs);
1183 out_parent:
1184 gfs2_holder_uninit(ghs + 2);
1185 out_inodes:
1186 gfs2_holder_uninit(ghs + 1);
1187 gfs2_holder_uninit(ghs);
1188 return error;
1189 }
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 static int gfs2_symlink(struct user_namespace *mnt_userns, struct inode *dir,
1202 struct dentry *dentry, const char *symname)
1203 {
1204 unsigned int size;
1205
1206 size = strlen(symname);
1207 if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
1208 return -ENAMETOOLONG;
1209
1210 return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0);
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 static int gfs2_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
1224 struct dentry *dentry, umode_t mode)
1225 {
1226 unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
1227 return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240 static int gfs2_mknod(struct user_namespace *mnt_userns, struct inode *dir,
1241 struct dentry *dentry, umode_t mode, dev_t dev)
1242 {
1243 return gfs2_create_inode(dir, dentry, NULL, mode, dev, NULL, 0, 0);
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
1258 struct file *file, unsigned flags,
1259 umode_t mode)
1260 {
1261 struct dentry *d;
1262 bool excl = !!(flags & O_EXCL);
1263
1264 if (!d_in_lookup(dentry))
1265 goto skip_lookup;
1266
1267 d = __gfs2_lookup(dir, dentry, file);
1268 if (IS_ERR(d))
1269 return PTR_ERR(d);
1270 if (d != NULL)
1271 dentry = d;
1272 if (d_really_is_positive(dentry)) {
1273 if (!(file->f_mode & FMODE_OPENED))
1274 return finish_no_open(file, d);
1275 dput(d);
1276 return excl && (flags & O_CREAT) ? -EEXIST : 0;
1277 }
1278
1279 BUG_ON(d != NULL);
1280
1281 skip_lookup:
1282 if (!(flags & O_CREAT))
1283 return -ENOENT;
1284
1285 return gfs2_create_inode(dir, dentry, file, S_IFREG | mode, 0, NULL, 0, excl);
1286 }
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1300 {
1301 struct inode *dir = &to->i_inode;
1302 struct super_block *sb = dir->i_sb;
1303 struct inode *tmp;
1304 int error = 0;
1305
1306 igrab(dir);
1307
1308 for (;;) {
1309 if (dir == &this->i_inode) {
1310 error = -EINVAL;
1311 break;
1312 }
1313 if (dir == d_inode(sb->s_root)) {
1314 error = 0;
1315 break;
1316 }
1317
1318 tmp = gfs2_lookupi(dir, &gfs2_qdotdot, 1);
1319 if (!tmp) {
1320 error = -ENOENT;
1321 break;
1322 }
1323 if (IS_ERR(tmp)) {
1324 error = PTR_ERR(tmp);
1325 break;
1326 }
1327
1328 iput(dir);
1329 dir = tmp;
1330 }
1331
1332 iput(dir);
1333
1334 return error;
1335 }
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 static int update_moved_ino(struct gfs2_inode *ip, struct gfs2_inode *ndip,
1347 int dir_rename)
1348 {
1349 if (dir_rename)
1350 return gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
1351
1352 ip->i_inode.i_ctime = current_time(&ip->i_inode);
1353 mark_inode_dirty_sync(&ip->i_inode);
1354 return 0;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 static int gfs2_rename(struct inode *odir, struct dentry *odentry,
1369 struct inode *ndir, struct dentry *ndentry)
1370 {
1371 struct gfs2_inode *odip = GFS2_I(odir);
1372 struct gfs2_inode *ndip = GFS2_I(ndir);
1373 struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
1374 struct gfs2_inode *nip = NULL;
1375 struct gfs2_sbd *sdp = GFS2_SB(odir);
1376 struct gfs2_holder ghs[4], r_gh, rd_gh;
1377 struct gfs2_rgrpd *nrgd;
1378 unsigned int num_gh;
1379 int dir_rename = 0;
1380 struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, };
1381 unsigned int x;
1382 int error;
1383
1384 gfs2_holder_mark_uninitialized(&r_gh);
1385 gfs2_holder_mark_uninitialized(&rd_gh);
1386 if (d_really_is_positive(ndentry)) {
1387 nip = GFS2_I(d_inode(ndentry));
1388 if (ip == nip)
1389 return 0;
1390 }
1391
1392 error = gfs2_rindex_update(sdp);
1393 if (error)
1394 return error;
1395
1396 error = gfs2_qa_get(ndip);
1397 if (error)
1398 return error;
1399
1400 if (odip != ndip) {
1401 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
1402 0, &r_gh);
1403 if (error)
1404 goto out;
1405
1406 if (S_ISDIR(ip->i_inode.i_mode)) {
1407 dir_rename = 1;
1408
1409 error = gfs2_ok_to_move(ip, ndip);
1410 if (error)
1411 goto out_gunlock_r;
1412 }
1413 }
1414
1415 num_gh = 1;
1416 gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs);
1417 if (odip != ndip) {
1418 gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE,GL_ASYNC,
1419 ghs + num_gh);
1420 num_gh++;
1421 }
1422 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
1423 num_gh++;
1424
1425 if (nip) {
1426 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC,
1427 ghs + num_gh);
1428 num_gh++;
1429 }
1430
1431 for (x = 0; x < num_gh; x++) {
1432 error = gfs2_glock_nq(ghs + x);
1433 if (error)
1434 goto out_gunlock;
1435 }
1436 error = gfs2_glock_async_wait(num_gh, ghs);
1437 if (error)
1438 goto out_gunlock;
1439
1440 if (nip) {
1441
1442
1443
1444
1445 nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr, 1);
1446 if (!nrgd) {
1447 error = -ENOENT;
1448 goto out_gunlock;
1449 }
1450 error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE,
1451 LM_FLAG_NODE_SCOPE, &rd_gh);
1452 if (error)
1453 goto out_gunlock;
1454 }
1455
1456 error = -ENOENT;
1457 if (ip->i_inode.i_nlink == 0)
1458 goto out_gunlock;
1459
1460
1461
1462 error = gfs2_unlink_ok(odip, &odentry->d_name, ip);
1463 if (error)
1464 goto out_gunlock;
1465
1466
1467
1468 if (nip) {
1469 error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip);
1470 if (error)
1471 goto out_gunlock;
1472
1473 if (nip->i_inode.i_nlink == 0) {
1474 error = -EAGAIN;
1475 goto out_gunlock;
1476 }
1477
1478 if (S_ISDIR(nip->i_inode.i_mode)) {
1479 if (nip->i_entries < 2) {
1480 gfs2_consist_inode(nip);
1481 error = -EIO;
1482 goto out_gunlock;
1483 }
1484 if (nip->i_entries > 2) {
1485 error = -ENOTEMPTY;
1486 goto out_gunlock;
1487 }
1488 }
1489 } else {
1490 error = gfs2_permission(&init_user_ns, ndir,
1491 MAY_WRITE | MAY_EXEC);
1492 if (error)
1493 goto out_gunlock;
1494
1495 error = gfs2_dir_check(ndir, &ndentry->d_name, NULL);
1496 switch (error) {
1497 case -ENOENT:
1498 error = 0;
1499 break;
1500 case 0:
1501 error = -EEXIST;
1502 goto out_gunlock;
1503 default:
1504 goto out_gunlock;
1505 }
1506
1507 if (odip != ndip) {
1508 if (!ndip->i_inode.i_nlink) {
1509 error = -ENOENT;
1510 goto out_gunlock;
1511 }
1512 if (ndip->i_entries == (u32)-1) {
1513 error = -EFBIG;
1514 goto out_gunlock;
1515 }
1516 if (S_ISDIR(ip->i_inode.i_mode) &&
1517 ndip->i_inode.i_nlink == (u32)-1) {
1518 error = -EMLINK;
1519 goto out_gunlock;
1520 }
1521 }
1522 }
1523
1524
1525
1526 if (dir_rename) {
1527 error = gfs2_permission(&init_user_ns, d_inode(odentry),
1528 MAY_WRITE);
1529 if (error)
1530 goto out_gunlock;
1531 }
1532
1533 if (nip == NULL) {
1534 error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name, &da);
1535 if (error)
1536 goto out_gunlock;
1537 }
1538
1539 if (da.nr_blocks) {
1540 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, };
1541 error = gfs2_quota_lock_check(ndip, &ap);
1542 if (error)
1543 goto out_gunlock;
1544
1545 error = gfs2_inplace_reserve(ndip, &ap);
1546 if (error)
1547 goto out_gunlock_q;
1548
1549 error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(ndip, &da, 4) +
1550 4 * RES_LEAF + 4, 0);
1551 if (error)
1552 goto out_ipreserv;
1553 } else {
1554 error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
1555 5 * RES_LEAF + 4, 0);
1556 if (error)
1557 goto out_gunlock;
1558 }
1559
1560
1561
1562 if (nip)
1563 error = gfs2_unlink_inode(ndip, ndentry);
1564
1565 error = update_moved_ino(ip, ndip, dir_rename);
1566 if (error)
1567 goto out_end_trans;
1568
1569 error = gfs2_dir_del(odip, odentry);
1570 if (error)
1571 goto out_end_trans;
1572
1573 error = gfs2_dir_add(ndir, &ndentry->d_name, ip, &da);
1574 if (error)
1575 goto out_end_trans;
1576
1577 out_end_trans:
1578 gfs2_trans_end(sdp);
1579 out_ipreserv:
1580 if (da.nr_blocks)
1581 gfs2_inplace_release(ndip);
1582 out_gunlock_q:
1583 if (da.nr_blocks)
1584 gfs2_quota_unlock(ndip);
1585 out_gunlock:
1586 gfs2_dir_no_add(&da);
1587 if (gfs2_holder_initialized(&rd_gh))
1588 gfs2_glock_dq_uninit(&rd_gh);
1589
1590 while (x--) {
1591 if (gfs2_holder_queued(ghs + x))
1592 gfs2_glock_dq(ghs + x);
1593 gfs2_holder_uninit(ghs + x);
1594 }
1595 out_gunlock_r:
1596 if (gfs2_holder_initialized(&r_gh))
1597 gfs2_glock_dq_uninit(&r_gh);
1598 out:
1599 gfs2_qa_put(ndip);
1600 return error;
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
1615 struct inode *ndir, struct dentry *ndentry,
1616 unsigned int flags)
1617 {
1618 struct gfs2_inode *odip = GFS2_I(odir);
1619 struct gfs2_inode *ndip = GFS2_I(ndir);
1620 struct gfs2_inode *oip = GFS2_I(odentry->d_inode);
1621 struct gfs2_inode *nip = GFS2_I(ndentry->d_inode);
1622 struct gfs2_sbd *sdp = GFS2_SB(odir);
1623 struct gfs2_holder ghs[4], r_gh;
1624 unsigned int num_gh;
1625 unsigned int x;
1626 umode_t old_mode = oip->i_inode.i_mode;
1627 umode_t new_mode = nip->i_inode.i_mode;
1628 int error;
1629
1630 gfs2_holder_mark_uninitialized(&r_gh);
1631 error = gfs2_rindex_update(sdp);
1632 if (error)
1633 return error;
1634
1635 if (odip != ndip) {
1636 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
1637 0, &r_gh);
1638 if (error)
1639 goto out;
1640
1641 if (S_ISDIR(old_mode)) {
1642
1643 error = gfs2_ok_to_move(oip, ndip);
1644 if (error)
1645 goto out_gunlock_r;
1646 }
1647
1648 if (S_ISDIR(new_mode)) {
1649
1650 error = gfs2_ok_to_move(nip, odip);
1651 if (error)
1652 goto out_gunlock_r;
1653 }
1654 }
1655
1656 num_gh = 1;
1657 gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs);
1658 if (odip != ndip) {
1659 gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC,
1660 ghs + num_gh);
1661 num_gh++;
1662 }
1663 gfs2_holder_init(oip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
1664 num_gh++;
1665
1666 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh);
1667 num_gh++;
1668
1669 for (x = 0; x < num_gh; x++) {
1670 error = gfs2_glock_nq(ghs + x);
1671 if (error)
1672 goto out_gunlock;
1673 }
1674
1675 error = gfs2_glock_async_wait(num_gh, ghs);
1676 if (error)
1677 goto out_gunlock;
1678
1679 error = -ENOENT;
1680 if (oip->i_inode.i_nlink == 0 || nip->i_inode.i_nlink == 0)
1681 goto out_gunlock;
1682
1683 error = gfs2_unlink_ok(odip, &odentry->d_name, oip);
1684 if (error)
1685 goto out_gunlock;
1686 error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip);
1687 if (error)
1688 goto out_gunlock;
1689
1690 if (S_ISDIR(old_mode)) {
1691 error = gfs2_permission(&init_user_ns, odentry->d_inode,
1692 MAY_WRITE);
1693 if (error)
1694 goto out_gunlock;
1695 }
1696 if (S_ISDIR(new_mode)) {
1697 error = gfs2_permission(&init_user_ns, ndentry->d_inode,
1698 MAY_WRITE);
1699 if (error)
1700 goto out_gunlock;
1701 }
1702 error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 4 * RES_LEAF, 0);
1703 if (error)
1704 goto out_gunlock;
1705
1706 error = update_moved_ino(oip, ndip, S_ISDIR(old_mode));
1707 if (error)
1708 goto out_end_trans;
1709
1710 error = update_moved_ino(nip, odip, S_ISDIR(new_mode));
1711 if (error)
1712 goto out_end_trans;
1713
1714 error = gfs2_dir_mvino(ndip, &ndentry->d_name, oip,
1715 IF2DT(old_mode));
1716 if (error)
1717 goto out_end_trans;
1718
1719 error = gfs2_dir_mvino(odip, &odentry->d_name, nip,
1720 IF2DT(new_mode));
1721 if (error)
1722 goto out_end_trans;
1723
1724 if (odip != ndip) {
1725 if (S_ISDIR(new_mode) && !S_ISDIR(old_mode)) {
1726 inc_nlink(&odip->i_inode);
1727 drop_nlink(&ndip->i_inode);
1728 } else if (S_ISDIR(old_mode) && !S_ISDIR(new_mode)) {
1729 inc_nlink(&ndip->i_inode);
1730 drop_nlink(&odip->i_inode);
1731 }
1732 }
1733 mark_inode_dirty(&ndip->i_inode);
1734 if (odip != ndip)
1735 mark_inode_dirty(&odip->i_inode);
1736
1737 out_end_trans:
1738 gfs2_trans_end(sdp);
1739 out_gunlock:
1740 while (x--) {
1741 if (gfs2_holder_queued(ghs + x))
1742 gfs2_glock_dq(ghs + x);
1743 gfs2_holder_uninit(ghs + x);
1744 }
1745 out_gunlock_r:
1746 if (gfs2_holder_initialized(&r_gh))
1747 gfs2_glock_dq_uninit(&r_gh);
1748 out:
1749 return error;
1750 }
1751
1752 static int gfs2_rename2(struct user_namespace *mnt_userns, struct inode *odir,
1753 struct dentry *odentry, struct inode *ndir,
1754 struct dentry *ndentry, unsigned int flags)
1755 {
1756 flags &= ~RENAME_NOREPLACE;
1757
1758 if (flags & ~RENAME_EXCHANGE)
1759 return -EINVAL;
1760
1761 if (flags & RENAME_EXCHANGE)
1762 return gfs2_exchange(odir, odentry, ndir, ndentry, flags);
1763
1764 return gfs2_rename(odir, odentry, ndir, ndentry);
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 static const char *gfs2_get_link(struct dentry *dentry,
1779 struct inode *inode,
1780 struct delayed_call *done)
1781 {
1782 struct gfs2_inode *ip = GFS2_I(inode);
1783 struct gfs2_holder i_gh;
1784 struct buffer_head *dibh;
1785 unsigned int size;
1786 char *buf;
1787 int error;
1788
1789 if (!dentry)
1790 return ERR_PTR(-ECHILD);
1791
1792 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1793 error = gfs2_glock_nq(&i_gh);
1794 if (error) {
1795 gfs2_holder_uninit(&i_gh);
1796 return ERR_PTR(error);
1797 }
1798
1799 size = (unsigned int)i_size_read(&ip->i_inode);
1800 if (size == 0) {
1801 gfs2_consist_inode(ip);
1802 buf = ERR_PTR(-EIO);
1803 goto out;
1804 }
1805
1806 error = gfs2_meta_inode_buffer(ip, &dibh);
1807 if (error) {
1808 buf = ERR_PTR(error);
1809 goto out;
1810 }
1811
1812 buf = kzalloc(size + 1, GFP_NOFS);
1813 if (!buf)
1814 buf = ERR_PTR(-ENOMEM);
1815 else
1816 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size);
1817 brelse(dibh);
1818 out:
1819 gfs2_glock_dq_uninit(&i_gh);
1820 if (!IS_ERR(buf))
1821 set_delayed_call(done, kfree_link, buf);
1822 return buf;
1823 }
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 int gfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
1839 int mask)
1840 {
1841 struct gfs2_inode *ip;
1842 struct gfs2_holder i_gh;
1843 int error;
1844
1845 gfs2_holder_mark_uninitialized(&i_gh);
1846 ip = GFS2_I(inode);
1847 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
1848 if (mask & MAY_NOT_BLOCK)
1849 return -ECHILD;
1850 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
1851 if (error)
1852 return error;
1853 }
1854
1855 if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
1856 error = -EPERM;
1857 else
1858 error = generic_permission(&init_user_ns, inode, mask);
1859 if (gfs2_holder_initialized(&i_gh))
1860 gfs2_glock_dq_uninit(&i_gh);
1861
1862 return error;
1863 }
1864
1865 static int __gfs2_setattr_simple(struct inode *inode, struct iattr *attr)
1866 {
1867 setattr_copy(&init_user_ns, inode, attr);
1868 mark_inode_dirty(inode);
1869 return 0;
1870 }
1871
1872 static int gfs2_setattr_simple(struct inode *inode, struct iattr *attr)
1873 {
1874 int error;
1875
1876 if (current->journal_info)
1877 return __gfs2_setattr_simple(inode, attr);
1878
1879 error = gfs2_trans_begin(GFS2_SB(inode), RES_DINODE, 0);
1880 if (error)
1881 return error;
1882
1883 error = __gfs2_setattr_simple(inode, attr);
1884 gfs2_trans_end(GFS2_SB(inode));
1885 return error;
1886 }
1887
1888 static int setattr_chown(struct inode *inode, struct iattr *attr)
1889 {
1890 struct gfs2_inode *ip = GFS2_I(inode);
1891 struct gfs2_sbd *sdp = GFS2_SB(inode);
1892 kuid_t ouid, nuid;
1893 kgid_t ogid, ngid;
1894 int error;
1895 struct gfs2_alloc_parms ap;
1896
1897 ouid = inode->i_uid;
1898 ogid = inode->i_gid;
1899 nuid = attr->ia_uid;
1900 ngid = attr->ia_gid;
1901
1902 if (!(attr->ia_valid & ATTR_UID) || uid_eq(ouid, nuid))
1903 ouid = nuid = NO_UID_QUOTA_CHANGE;
1904 if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
1905 ogid = ngid = NO_GID_QUOTA_CHANGE;
1906 error = gfs2_qa_get(ip);
1907 if (error)
1908 return error;
1909
1910 error = gfs2_rindex_update(sdp);
1911 if (error)
1912 goto out;
1913
1914 error = gfs2_quota_lock(ip, nuid, ngid);
1915 if (error)
1916 goto out;
1917
1918 ap.target = gfs2_get_inode_blocks(&ip->i_inode);
1919
1920 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
1921 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
1922 error = gfs2_quota_check(ip, nuid, ngid, &ap);
1923 if (error)
1924 goto out_gunlock_q;
1925 }
1926
1927 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_QUOTA, 0);
1928 if (error)
1929 goto out_gunlock_q;
1930
1931 error = gfs2_setattr_simple(inode, attr);
1932 if (error)
1933 goto out_end_trans;
1934
1935 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) ||
1936 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) {
1937 gfs2_quota_change(ip, -(s64)ap.target, ouid, ogid);
1938 gfs2_quota_change(ip, ap.target, nuid, ngid);
1939 }
1940
1941 out_end_trans:
1942 gfs2_trans_end(sdp);
1943 out_gunlock_q:
1944 gfs2_quota_unlock(ip);
1945 out:
1946 gfs2_qa_put(ip);
1947 return error;
1948 }
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 static int gfs2_setattr(struct user_namespace *mnt_userns,
1963 struct dentry *dentry, struct iattr *attr)
1964 {
1965 struct inode *inode = d_inode(dentry);
1966 struct gfs2_inode *ip = GFS2_I(inode);
1967 struct gfs2_holder i_gh;
1968 int error;
1969
1970 error = gfs2_qa_get(ip);
1971 if (error)
1972 return error;
1973
1974 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1975 if (error)
1976 goto out;
1977
1978 error = may_setattr(&init_user_ns, inode, attr->ia_valid);
1979 if (error)
1980 goto error;
1981
1982 error = setattr_prepare(&init_user_ns, dentry, attr);
1983 if (error)
1984 goto error;
1985
1986 if (attr->ia_valid & ATTR_SIZE)
1987 error = gfs2_setattr_size(inode, attr->ia_size);
1988 else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
1989 error = setattr_chown(inode, attr);
1990 else {
1991 error = gfs2_setattr_simple(inode, attr);
1992 if (!error && attr->ia_valid & ATTR_MODE)
1993 error = posix_acl_chmod(&init_user_ns, inode,
1994 inode->i_mode);
1995 }
1996
1997 error:
1998 if (!error)
1999 mark_inode_dirty(inode);
2000 gfs2_glock_dq_uninit(&i_gh);
2001 out:
2002 gfs2_qa_put(ip);
2003 return error;
2004 }
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 static int gfs2_getattr(struct user_namespace *mnt_userns,
2024 const struct path *path, struct kstat *stat,
2025 u32 request_mask, unsigned int flags)
2026 {
2027 struct inode *inode = d_inode(path->dentry);
2028 struct gfs2_inode *ip = GFS2_I(inode);
2029 struct gfs2_holder gh;
2030 u32 gfsflags;
2031 int error;
2032
2033 gfs2_holder_mark_uninitialized(&gh);
2034 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
2035 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
2036 if (error)
2037 return error;
2038 }
2039
2040 gfsflags = ip->i_diskflags;
2041 if (gfsflags & GFS2_DIF_APPENDONLY)
2042 stat->attributes |= STATX_ATTR_APPEND;
2043 if (gfsflags & GFS2_DIF_IMMUTABLE)
2044 stat->attributes |= STATX_ATTR_IMMUTABLE;
2045
2046 stat->attributes_mask |= (STATX_ATTR_APPEND |
2047 STATX_ATTR_COMPRESSED |
2048 STATX_ATTR_ENCRYPTED |
2049 STATX_ATTR_IMMUTABLE |
2050 STATX_ATTR_NODUMP);
2051
2052 generic_fillattr(&init_user_ns, inode, stat);
2053
2054 if (gfs2_holder_initialized(&gh))
2055 gfs2_glock_dq_uninit(&gh);
2056
2057 return 0;
2058 }
2059
2060 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2061 u64 start, u64 len)
2062 {
2063 struct gfs2_inode *ip = GFS2_I(inode);
2064 struct gfs2_holder gh;
2065 int ret;
2066
2067 inode_lock_shared(inode);
2068
2069 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
2070 if (ret)
2071 goto out;
2072
2073 ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops);
2074
2075 gfs2_glock_dq_uninit(&gh);
2076
2077 out:
2078 inode_unlock_shared(inode);
2079 return ret;
2080 }
2081
2082 loff_t gfs2_seek_data(struct file *file, loff_t offset)
2083 {
2084 struct inode *inode = file->f_mapping->host;
2085 struct gfs2_inode *ip = GFS2_I(inode);
2086 struct gfs2_holder gh;
2087 loff_t ret;
2088
2089 inode_lock_shared(inode);
2090 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
2091 if (!ret)
2092 ret = iomap_seek_data(inode, offset, &gfs2_iomap_ops);
2093 gfs2_glock_dq_uninit(&gh);
2094 inode_unlock_shared(inode);
2095
2096 if (ret < 0)
2097 return ret;
2098 return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
2099 }
2100
2101 loff_t gfs2_seek_hole(struct file *file, loff_t offset)
2102 {
2103 struct inode *inode = file->f_mapping->host;
2104 struct gfs2_inode *ip = GFS2_I(inode);
2105 struct gfs2_holder gh;
2106 loff_t ret;
2107
2108 inode_lock_shared(inode);
2109 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
2110 if (!ret)
2111 ret = iomap_seek_hole(inode, offset, &gfs2_iomap_ops);
2112 gfs2_glock_dq_uninit(&gh);
2113 inode_unlock_shared(inode);
2114
2115 if (ret < 0)
2116 return ret;
2117 return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
2118 }
2119
2120 static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
2121 int flags)
2122 {
2123 struct gfs2_inode *ip = GFS2_I(inode);
2124 struct gfs2_glock *gl = ip->i_gl;
2125 struct gfs2_holder *gh;
2126 int error;
2127
2128 gh = gfs2_glock_is_locked_by_me(gl);
2129 if (gh && !gfs2_glock_is_held_excl(gl)) {
2130 gfs2_glock_dq(gh);
2131 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
2132 error = gfs2_glock_nq(gh);
2133 if (error)
2134 return error;
2135 }
2136 return generic_update_time(inode, time, flags);
2137 }
2138
2139 static const struct inode_operations gfs2_file_iops = {
2140 .permission = gfs2_permission,
2141 .setattr = gfs2_setattr,
2142 .getattr = gfs2_getattr,
2143 .listxattr = gfs2_listxattr,
2144 .fiemap = gfs2_fiemap,
2145 .get_acl = gfs2_get_acl,
2146 .set_acl = gfs2_set_acl,
2147 .update_time = gfs2_update_time,
2148 .fileattr_get = gfs2_fileattr_get,
2149 .fileattr_set = gfs2_fileattr_set,
2150 };
2151
2152 static const struct inode_operations gfs2_dir_iops = {
2153 .create = gfs2_create,
2154 .lookup = gfs2_lookup,
2155 .link = gfs2_link,
2156 .unlink = gfs2_unlink,
2157 .symlink = gfs2_symlink,
2158 .mkdir = gfs2_mkdir,
2159 .rmdir = gfs2_unlink,
2160 .mknod = gfs2_mknod,
2161 .rename = gfs2_rename2,
2162 .permission = gfs2_permission,
2163 .setattr = gfs2_setattr,
2164 .getattr = gfs2_getattr,
2165 .listxattr = gfs2_listxattr,
2166 .fiemap = gfs2_fiemap,
2167 .get_acl = gfs2_get_acl,
2168 .set_acl = gfs2_set_acl,
2169 .update_time = gfs2_update_time,
2170 .atomic_open = gfs2_atomic_open,
2171 .fileattr_get = gfs2_fileattr_get,
2172 .fileattr_set = gfs2_fileattr_set,
2173 };
2174
2175 static const struct inode_operations gfs2_symlink_iops = {
2176 .get_link = gfs2_get_link,
2177 .permission = gfs2_permission,
2178 .setattr = gfs2_setattr,
2179 .getattr = gfs2_getattr,
2180 .listxattr = gfs2_listxattr,
2181 .fiemap = gfs2_fiemap,
2182 };
2183