0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/pagemap.h>
0015 #include <linux/mpage.h>
0016 #include <linux/sched.h>
0017 #include <linux/cred.h>
0018 #include <linux/uio.h>
0019 #include <linux/xattr.h>
0020 #include <linux/blkdev.h>
0021
0022 #include "hfs_fs.h"
0023 #include "btree.h"
0024
0025 static const struct file_operations hfs_file_operations;
0026 static const struct inode_operations hfs_file_inode_operations;
0027
0028
0029
0030 #define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
0031
0032 static int hfs_writepage(struct page *page, struct writeback_control *wbc)
0033 {
0034 return block_write_full_page(page, hfs_get_block, wbc);
0035 }
0036
0037 static int hfs_read_folio(struct file *file, struct folio *folio)
0038 {
0039 return block_read_full_folio(folio, hfs_get_block);
0040 }
0041
0042 static void hfs_write_failed(struct address_space *mapping, loff_t to)
0043 {
0044 struct inode *inode = mapping->host;
0045
0046 if (to > inode->i_size) {
0047 truncate_pagecache(inode, inode->i_size);
0048 hfs_file_truncate(inode);
0049 }
0050 }
0051
0052 int hfs_write_begin(struct file *file, struct address_space *mapping,
0053 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
0054 {
0055 int ret;
0056
0057 *pagep = NULL;
0058 ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
0059 hfs_get_block,
0060 &HFS_I(mapping->host)->phys_size);
0061 if (unlikely(ret))
0062 hfs_write_failed(mapping, pos + len);
0063
0064 return ret;
0065 }
0066
0067 static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
0068 {
0069 return generic_block_bmap(mapping, block, hfs_get_block);
0070 }
0071
0072 static bool hfs_release_folio(struct folio *folio, gfp_t mask)
0073 {
0074 struct inode *inode = folio->mapping->host;
0075 struct super_block *sb = inode->i_sb;
0076 struct hfs_btree *tree;
0077 struct hfs_bnode *node;
0078 u32 nidx;
0079 int i;
0080 bool res = true;
0081
0082 switch (inode->i_ino) {
0083 case HFS_EXT_CNID:
0084 tree = HFS_SB(sb)->ext_tree;
0085 break;
0086 case HFS_CAT_CNID:
0087 tree = HFS_SB(sb)->cat_tree;
0088 break;
0089 default:
0090 BUG();
0091 return false;
0092 }
0093
0094 if (!tree)
0095 return false;
0096
0097 if (tree->node_size >= PAGE_SIZE) {
0098 nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
0099 spin_lock(&tree->hash_lock);
0100 node = hfs_bnode_findhash(tree, nidx);
0101 if (!node)
0102 ;
0103 else if (atomic_read(&node->refcnt))
0104 res = false;
0105 if (res && node) {
0106 hfs_bnode_unhash(node);
0107 hfs_bnode_free(node);
0108 }
0109 spin_unlock(&tree->hash_lock);
0110 } else {
0111 nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
0112 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
0113 spin_lock(&tree->hash_lock);
0114 do {
0115 node = hfs_bnode_findhash(tree, nidx++);
0116 if (!node)
0117 continue;
0118 if (atomic_read(&node->refcnt)) {
0119 res = false;
0120 break;
0121 }
0122 hfs_bnode_unhash(node);
0123 hfs_bnode_free(node);
0124 } while (--i && nidx < tree->node_count);
0125 spin_unlock(&tree->hash_lock);
0126 }
0127 return res ? try_to_free_buffers(folio) : false;
0128 }
0129
0130 static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
0131 {
0132 struct file *file = iocb->ki_filp;
0133 struct address_space *mapping = file->f_mapping;
0134 struct inode *inode = mapping->host;
0135 size_t count = iov_iter_count(iter);
0136 ssize_t ret;
0137
0138 ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block);
0139
0140
0141
0142
0143
0144 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
0145 loff_t isize = i_size_read(inode);
0146 loff_t end = iocb->ki_pos + count;
0147
0148 if (end > isize)
0149 hfs_write_failed(mapping, end);
0150 }
0151
0152 return ret;
0153 }
0154
0155 static int hfs_writepages(struct address_space *mapping,
0156 struct writeback_control *wbc)
0157 {
0158 return mpage_writepages(mapping, wbc, hfs_get_block);
0159 }
0160
0161 const struct address_space_operations hfs_btree_aops = {
0162 .dirty_folio = block_dirty_folio,
0163 .invalidate_folio = block_invalidate_folio,
0164 .read_folio = hfs_read_folio,
0165 .writepage = hfs_writepage,
0166 .write_begin = hfs_write_begin,
0167 .write_end = generic_write_end,
0168 .bmap = hfs_bmap,
0169 .release_folio = hfs_release_folio,
0170 };
0171
0172 const struct address_space_operations hfs_aops = {
0173 .dirty_folio = block_dirty_folio,
0174 .invalidate_folio = block_invalidate_folio,
0175 .read_folio = hfs_read_folio,
0176 .writepage = hfs_writepage,
0177 .write_begin = hfs_write_begin,
0178 .write_end = generic_write_end,
0179 .bmap = hfs_bmap,
0180 .direct_IO = hfs_direct_IO,
0181 .writepages = hfs_writepages,
0182 };
0183
0184
0185
0186
0187 struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t mode)
0188 {
0189 struct super_block *sb = dir->i_sb;
0190 struct inode *inode = new_inode(sb);
0191 if (!inode)
0192 return NULL;
0193
0194 mutex_init(&HFS_I(inode)->extents_lock);
0195 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
0196 spin_lock_init(&HFS_I(inode)->open_dir_lock);
0197 hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
0198 inode->i_ino = HFS_SB(sb)->next_id++;
0199 inode->i_mode = mode;
0200 inode->i_uid = current_fsuid();
0201 inode->i_gid = current_fsgid();
0202 set_nlink(inode, 1);
0203 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
0204 HFS_I(inode)->flags = 0;
0205 HFS_I(inode)->rsrc_inode = NULL;
0206 HFS_I(inode)->fs_blocks = 0;
0207 if (S_ISDIR(mode)) {
0208 inode->i_size = 2;
0209 HFS_SB(sb)->folder_count++;
0210 if (dir->i_ino == HFS_ROOT_CNID)
0211 HFS_SB(sb)->root_dirs++;
0212 inode->i_op = &hfs_dir_inode_operations;
0213 inode->i_fop = &hfs_dir_operations;
0214 inode->i_mode |= S_IRWXUGO;
0215 inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
0216 } else if (S_ISREG(mode)) {
0217 HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
0218 HFS_SB(sb)->file_count++;
0219 if (dir->i_ino == HFS_ROOT_CNID)
0220 HFS_SB(sb)->root_files++;
0221 inode->i_op = &hfs_file_inode_operations;
0222 inode->i_fop = &hfs_file_operations;
0223 inode->i_mapping->a_ops = &hfs_aops;
0224 inode->i_mode |= S_IRUGO|S_IXUGO;
0225 if (mode & S_IWUSR)
0226 inode->i_mode |= S_IWUGO;
0227 inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
0228 HFS_I(inode)->phys_size = 0;
0229 HFS_I(inode)->alloc_blocks = 0;
0230 HFS_I(inode)->first_blocks = 0;
0231 HFS_I(inode)->cached_start = 0;
0232 HFS_I(inode)->cached_blocks = 0;
0233 memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
0234 memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
0235 }
0236 insert_inode_hash(inode);
0237 mark_inode_dirty(inode);
0238 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
0239 hfs_mark_mdb_dirty(sb);
0240
0241 return inode;
0242 }
0243
0244 void hfs_delete_inode(struct inode *inode)
0245 {
0246 struct super_block *sb = inode->i_sb;
0247
0248 hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino);
0249 if (S_ISDIR(inode->i_mode)) {
0250 HFS_SB(sb)->folder_count--;
0251 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
0252 HFS_SB(sb)->root_dirs--;
0253 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
0254 hfs_mark_mdb_dirty(sb);
0255 return;
0256 }
0257 HFS_SB(sb)->file_count--;
0258 if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
0259 HFS_SB(sb)->root_files--;
0260 if (S_ISREG(inode->i_mode)) {
0261 if (!inode->i_nlink) {
0262 inode->i_size = 0;
0263 hfs_file_truncate(inode);
0264 }
0265 }
0266 set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
0267 hfs_mark_mdb_dirty(sb);
0268 }
0269
0270 void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
0271 __be32 __log_size, __be32 phys_size, u32 clump_size)
0272 {
0273 struct super_block *sb = inode->i_sb;
0274 u32 log_size = be32_to_cpu(__log_size);
0275 u16 count;
0276 int i;
0277
0278 memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
0279 for (count = 0, i = 0; i < 3; i++)
0280 count += be16_to_cpu(ext[i].count);
0281 HFS_I(inode)->first_blocks = count;
0282
0283 inode->i_size = HFS_I(inode)->phys_size = log_size;
0284 HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
0285 inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
0286 HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
0287 HFS_SB(sb)->alloc_blksz;
0288 HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
0289 if (!HFS_I(inode)->clump_blocks)
0290 HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
0291 }
0292
0293 struct hfs_iget_data {
0294 struct hfs_cat_key *key;
0295 hfs_cat_rec *rec;
0296 };
0297
0298 static int hfs_test_inode(struct inode *inode, void *data)
0299 {
0300 struct hfs_iget_data *idata = data;
0301 hfs_cat_rec *rec;
0302
0303 rec = idata->rec;
0304 switch (rec->type) {
0305 case HFS_CDR_DIR:
0306 return inode->i_ino == be32_to_cpu(rec->dir.DirID);
0307 case HFS_CDR_FIL:
0308 return inode->i_ino == be32_to_cpu(rec->file.FlNum);
0309 default:
0310 BUG();
0311 return 1;
0312 }
0313 }
0314
0315
0316
0317
0318 static int hfs_read_inode(struct inode *inode, void *data)
0319 {
0320 struct hfs_iget_data *idata = data;
0321 struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
0322 hfs_cat_rec *rec;
0323
0324 HFS_I(inode)->flags = 0;
0325 HFS_I(inode)->rsrc_inode = NULL;
0326 mutex_init(&HFS_I(inode)->extents_lock);
0327 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
0328 spin_lock_init(&HFS_I(inode)->open_dir_lock);
0329
0330
0331 inode->i_uid = hsb->s_uid;
0332 inode->i_gid = hsb->s_gid;
0333 set_nlink(inode, 1);
0334
0335 if (idata->key)
0336 HFS_I(inode)->cat_key = *idata->key;
0337 else
0338 HFS_I(inode)->flags |= HFS_FLG_RSRC;
0339 HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
0340
0341 rec = idata->rec;
0342 switch (rec->type) {
0343 case HFS_CDR_FIL:
0344 if (!HFS_IS_RSRC(inode)) {
0345 hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
0346 rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
0347 } else {
0348 hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
0349 rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
0350 }
0351
0352 inode->i_ino = be32_to_cpu(rec->file.FlNum);
0353 inode->i_mode = S_IRUGO | S_IXUGO;
0354 if (!(rec->file.Flags & HFS_FIL_LOCK))
0355 inode->i_mode |= S_IWUGO;
0356 inode->i_mode &= ~hsb->s_file_umask;
0357 inode->i_mode |= S_IFREG;
0358 inode->i_ctime = inode->i_atime = inode->i_mtime =
0359 hfs_m_to_utime(rec->file.MdDat);
0360 inode->i_op = &hfs_file_inode_operations;
0361 inode->i_fop = &hfs_file_operations;
0362 inode->i_mapping->a_ops = &hfs_aops;
0363 break;
0364 case HFS_CDR_DIR:
0365 inode->i_ino = be32_to_cpu(rec->dir.DirID);
0366 inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
0367 HFS_I(inode)->fs_blocks = 0;
0368 inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
0369 inode->i_ctime = inode->i_atime = inode->i_mtime =
0370 hfs_m_to_utime(rec->dir.MdDat);
0371 inode->i_op = &hfs_dir_inode_operations;
0372 inode->i_fop = &hfs_dir_operations;
0373 break;
0374 default:
0375 make_bad_inode(inode);
0376 }
0377 return 0;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
0390 {
0391 struct hfs_iget_data data = { key, rec };
0392 struct inode *inode;
0393 u32 cnid;
0394
0395 switch (rec->type) {
0396 case HFS_CDR_DIR:
0397 cnid = be32_to_cpu(rec->dir.DirID);
0398 break;
0399 case HFS_CDR_FIL:
0400 cnid = be32_to_cpu(rec->file.FlNum);
0401 break;
0402 default:
0403 return NULL;
0404 }
0405 inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
0406 if (inode && (inode->i_state & I_NEW))
0407 unlock_new_inode(inode);
0408 return inode;
0409 }
0410
0411 void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
0412 __be32 *log_size, __be32 *phys_size)
0413 {
0414 memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
0415
0416 if (log_size)
0417 *log_size = cpu_to_be32(inode->i_size);
0418 if (phys_size)
0419 *phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
0420 HFS_SB(inode->i_sb)->alloc_blksz);
0421 }
0422
0423 int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
0424 {
0425 struct inode *main_inode = inode;
0426 struct hfs_find_data fd;
0427 hfs_cat_rec rec;
0428 int res;
0429
0430 hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino);
0431 res = hfs_ext_write_extent(inode);
0432 if (res)
0433 return res;
0434
0435 if (inode->i_ino < HFS_FIRSTUSER_CNID) {
0436 switch (inode->i_ino) {
0437 case HFS_ROOT_CNID:
0438 break;
0439 case HFS_EXT_CNID:
0440 hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
0441 return 0;
0442 case HFS_CAT_CNID:
0443 hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
0444 return 0;
0445 default:
0446 BUG();
0447 return -EIO;
0448 }
0449 }
0450
0451 if (HFS_IS_RSRC(inode))
0452 main_inode = HFS_I(inode)->rsrc_inode;
0453
0454 if (!main_inode->i_nlink)
0455 return 0;
0456
0457 if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
0458
0459 return -EIO;
0460
0461 fd.search_key->cat = HFS_I(main_inode)->cat_key;
0462 if (hfs_brec_find(&fd))
0463
0464 goto out;
0465
0466 if (S_ISDIR(main_inode->i_mode)) {
0467 WARN_ON(fd.entrylength < sizeof(struct hfs_cat_dir));
0468 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
0469 sizeof(struct hfs_cat_dir));
0470 if (rec.type != HFS_CDR_DIR ||
0471 be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
0472 }
0473
0474 rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
0475 rec.dir.Val = cpu_to_be16(inode->i_size - 2);
0476
0477 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
0478 sizeof(struct hfs_cat_dir));
0479 } else if (HFS_IS_RSRC(inode)) {
0480 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
0481 sizeof(struct hfs_cat_file));
0482 hfs_inode_write_fork(inode, rec.file.RExtRec,
0483 &rec.file.RLgLen, &rec.file.RPyLen);
0484 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
0485 sizeof(struct hfs_cat_file));
0486 } else {
0487 WARN_ON(fd.entrylength < sizeof(struct hfs_cat_file));
0488 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
0489 sizeof(struct hfs_cat_file));
0490 if (rec.type != HFS_CDR_FIL ||
0491 be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
0492 }
0493
0494 if (inode->i_mode & S_IWUSR)
0495 rec.file.Flags &= ~HFS_FIL_LOCK;
0496 else
0497 rec.file.Flags |= HFS_FIL_LOCK;
0498 hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
0499 rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
0500
0501 hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
0502 sizeof(struct hfs_cat_file));
0503 }
0504 out:
0505 hfs_find_exit(&fd);
0506 return 0;
0507 }
0508
0509 static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
0510 unsigned int flags)
0511 {
0512 struct inode *inode = NULL;
0513 hfs_cat_rec rec;
0514 struct hfs_find_data fd;
0515 int res;
0516
0517 if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
0518 goto out;
0519
0520 inode = HFS_I(dir)->rsrc_inode;
0521 if (inode)
0522 goto out;
0523
0524 inode = new_inode(dir->i_sb);
0525 if (!inode)
0526 return ERR_PTR(-ENOMEM);
0527
0528 res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
0529 if (res) {
0530 iput(inode);
0531 return ERR_PTR(res);
0532 }
0533 fd.search_key->cat = HFS_I(dir)->cat_key;
0534 res = hfs_brec_read(&fd, &rec, sizeof(rec));
0535 if (!res) {
0536 struct hfs_iget_data idata = { NULL, &rec };
0537 hfs_read_inode(inode, &idata);
0538 }
0539 hfs_find_exit(&fd);
0540 if (res) {
0541 iput(inode);
0542 return ERR_PTR(res);
0543 }
0544 HFS_I(inode)->rsrc_inode = dir;
0545 HFS_I(dir)->rsrc_inode = inode;
0546 igrab(dir);
0547 inode_fake_hash(inode);
0548 mark_inode_dirty(inode);
0549 dont_mount(dentry);
0550 out:
0551 return d_splice_alias(inode, dentry);
0552 }
0553
0554 void hfs_evict_inode(struct inode *inode)
0555 {
0556 truncate_inode_pages_final(&inode->i_data);
0557 clear_inode(inode);
0558 if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
0559 HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
0560 iput(HFS_I(inode)->rsrc_inode);
0561 }
0562 }
0563
0564 static int hfs_file_open(struct inode *inode, struct file *file)
0565 {
0566 if (HFS_IS_RSRC(inode))
0567 inode = HFS_I(inode)->rsrc_inode;
0568 atomic_inc(&HFS_I(inode)->opencnt);
0569 return 0;
0570 }
0571
0572 static int hfs_file_release(struct inode *inode, struct file *file)
0573 {
0574
0575
0576 if (HFS_IS_RSRC(inode))
0577 inode = HFS_I(inode)->rsrc_inode;
0578 if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
0579 inode_lock(inode);
0580 hfs_file_truncate(inode);
0581
0582
0583
0584
0585 inode_unlock(inode);
0586 }
0587 return 0;
0588 }
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 int hfs_inode_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
0608 struct iattr *attr)
0609 {
0610 struct inode *inode = d_inode(dentry);
0611 struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
0612 int error;
0613
0614 error = setattr_prepare(&init_user_ns, dentry,
0615 attr);
0616 if (error)
0617 return error;
0618
0619
0620 if (((attr->ia_valid & ATTR_UID) &&
0621 (!uid_eq(attr->ia_uid, hsb->s_uid))) ||
0622 ((attr->ia_valid & ATTR_GID) &&
0623 (!gid_eq(attr->ia_gid, hsb->s_gid))) ||
0624 ((attr->ia_valid & ATTR_MODE) &&
0625 ((S_ISDIR(inode->i_mode) &&
0626 (attr->ia_mode != inode->i_mode)) ||
0627 (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
0628 return hsb->s_quiet ? 0 : error;
0629 }
0630
0631 if (attr->ia_valid & ATTR_MODE) {
0632
0633 if (attr->ia_mode & S_IWUSR)
0634 attr->ia_mode = inode->i_mode | S_IWUGO;
0635 else
0636 attr->ia_mode = inode->i_mode & ~S_IWUGO;
0637 attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
0638 }
0639
0640 if ((attr->ia_valid & ATTR_SIZE) &&
0641 attr->ia_size != i_size_read(inode)) {
0642 inode_dio_wait(inode);
0643
0644 error = inode_newsize_ok(inode, attr->ia_size);
0645 if (error)
0646 return error;
0647
0648 truncate_setsize(inode, attr->ia_size);
0649 hfs_file_truncate(inode);
0650 inode->i_atime = inode->i_mtime = inode->i_ctime =
0651 current_time(inode);
0652 }
0653
0654 setattr_copy(&init_user_ns, inode, attr);
0655 mark_inode_dirty(inode);
0656 return 0;
0657 }
0658
0659 static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
0660 int datasync)
0661 {
0662 struct inode *inode = filp->f_mapping->host;
0663 struct super_block * sb;
0664 int ret, err;
0665
0666 ret = file_write_and_wait_range(filp, start, end);
0667 if (ret)
0668 return ret;
0669 inode_lock(inode);
0670
0671
0672 ret = write_inode_now(inode, 0);
0673
0674
0675 sb = inode->i_sb;
0676 flush_delayed_work(&HFS_SB(sb)->mdb_work);
0677
0678 err = sync_blockdev(sb->s_bdev);
0679 if (!ret)
0680 ret = err;
0681 inode_unlock(inode);
0682 return ret;
0683 }
0684
0685 static const struct file_operations hfs_file_operations = {
0686 .llseek = generic_file_llseek,
0687 .read_iter = generic_file_read_iter,
0688 .write_iter = generic_file_write_iter,
0689 .mmap = generic_file_mmap,
0690 .splice_read = generic_file_splice_read,
0691 .fsync = hfs_file_fsync,
0692 .open = hfs_file_open,
0693 .release = hfs_file_release,
0694 };
0695
0696 static const struct inode_operations hfs_file_inode_operations = {
0697 .lookup = hfs_file_lookup,
0698 .setattr = hfs_inode_setattr,
0699 .listxattr = generic_listxattr,
0700 };