0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/fs.h>
0009 #include <linux/f2fs_fs.h>
0010 #include <linux/stat.h>
0011 #include <linux/buffer_head.h>
0012 #include <linux/writeback.h>
0013 #include <linux/blkdev.h>
0014 #include <linux/falloc.h>
0015 #include <linux/types.h>
0016 #include <linux/compat.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/mount.h>
0019 #include <linux/pagevec.h>
0020 #include <linux/uio.h>
0021 #include <linux/uuid.h>
0022 #include <linux/file.h>
0023 #include <linux/nls.h>
0024 #include <linux/sched/signal.h>
0025 #include <linux/fileattr.h>
0026 #include <linux/fadvise.h>
0027 #include <linux/iomap.h>
0028
0029 #include "f2fs.h"
0030 #include "node.h"
0031 #include "segment.h"
0032 #include "xattr.h"
0033 #include "acl.h"
0034 #include "gc.h"
0035 #include "iostat.h"
0036 #include <trace/events/f2fs.h>
0037 #include <uapi/linux/f2fs.h>
0038
0039 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
0040 {
0041 struct inode *inode = file_inode(vmf->vma->vm_file);
0042 vm_fault_t ret;
0043
0044 ret = filemap_fault(vmf);
0045 if (!ret)
0046 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
0047 F2FS_BLKSIZE);
0048
0049 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
0050
0051 return ret;
0052 }
0053
0054 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
0055 {
0056 struct page *page = vmf->page;
0057 struct inode *inode = file_inode(vmf->vma->vm_file);
0058 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0059 struct dnode_of_data dn;
0060 bool need_alloc = true;
0061 int err = 0;
0062
0063 if (unlikely(IS_IMMUTABLE(inode)))
0064 return VM_FAULT_SIGBUS;
0065
0066 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
0067 return VM_FAULT_SIGBUS;
0068
0069 if (unlikely(f2fs_cp_error(sbi))) {
0070 err = -EIO;
0071 goto err;
0072 }
0073
0074 if (!f2fs_is_checkpoint_ready(sbi)) {
0075 err = -ENOSPC;
0076 goto err;
0077 }
0078
0079 err = f2fs_convert_inline_inode(inode);
0080 if (err)
0081 goto err;
0082
0083 #ifdef CONFIG_F2FS_FS_COMPRESSION
0084 if (f2fs_compressed_file(inode)) {
0085 int ret = f2fs_is_compressed_cluster(inode, page->index);
0086
0087 if (ret < 0) {
0088 err = ret;
0089 goto err;
0090 } else if (ret) {
0091 need_alloc = false;
0092 }
0093 }
0094 #endif
0095
0096 if (need_alloc)
0097 f2fs_balance_fs(sbi, true);
0098
0099 sb_start_pagefault(inode->i_sb);
0100
0101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
0102
0103 file_update_time(vmf->vma->vm_file);
0104 filemap_invalidate_lock_shared(inode->i_mapping);
0105 lock_page(page);
0106 if (unlikely(page->mapping != inode->i_mapping ||
0107 page_offset(page) > i_size_read(inode) ||
0108 !PageUptodate(page))) {
0109 unlock_page(page);
0110 err = -EFAULT;
0111 goto out_sem;
0112 }
0113
0114 if (need_alloc) {
0115
0116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
0117 set_new_dnode(&dn, inode, NULL, NULL, 0);
0118 err = f2fs_get_block(&dn, page->index);
0119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
0120 }
0121
0122 #ifdef CONFIG_F2FS_FS_COMPRESSION
0123 if (!need_alloc) {
0124 set_new_dnode(&dn, inode, NULL, NULL, 0);
0125 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
0126 f2fs_put_dnode(&dn);
0127 }
0128 #endif
0129 if (err) {
0130 unlock_page(page);
0131 goto out_sem;
0132 }
0133
0134 f2fs_wait_on_page_writeback(page, DATA, false, true);
0135
0136
0137 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
0138
0139
0140
0141
0142 if (PageMappedToDisk(page))
0143 goto out_sem;
0144
0145
0146 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
0147 i_size_read(inode)) {
0148 loff_t offset;
0149
0150 offset = i_size_read(inode) & ~PAGE_MASK;
0151 zero_user_segment(page, offset, PAGE_SIZE);
0152 }
0153 set_page_dirty(page);
0154 if (!PageUptodate(page))
0155 SetPageUptodate(page);
0156
0157 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
0158 f2fs_update_time(sbi, REQ_TIME);
0159
0160 trace_f2fs_vm_page_mkwrite(page, DATA);
0161 out_sem:
0162 filemap_invalidate_unlock_shared(inode->i_mapping);
0163
0164 sb_end_pagefault(inode->i_sb);
0165 err:
0166 return block_page_mkwrite_return(err);
0167 }
0168
0169 static const struct vm_operations_struct f2fs_file_vm_ops = {
0170 .fault = f2fs_filemap_fault,
0171 .map_pages = filemap_map_pages,
0172 .page_mkwrite = f2fs_vm_page_mkwrite,
0173 };
0174
0175 static int get_parent_ino(struct inode *inode, nid_t *pino)
0176 {
0177 struct dentry *dentry;
0178
0179
0180
0181
0182
0183 dentry = d_find_alias(inode);
0184 if (!dentry)
0185 return 0;
0186
0187 *pino = parent_ino(dentry);
0188 dput(dentry);
0189 return 1;
0190 }
0191
0192 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
0193 {
0194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0195 enum cp_reason_type cp_reason = CP_NO_NEEDED;
0196
0197 if (!S_ISREG(inode->i_mode))
0198 cp_reason = CP_NON_REGULAR;
0199 else if (f2fs_compressed_file(inode))
0200 cp_reason = CP_COMPRESSED;
0201 else if (inode->i_nlink != 1)
0202 cp_reason = CP_HARDLINK;
0203 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
0204 cp_reason = CP_SB_NEED_CP;
0205 else if (file_wrong_pino(inode))
0206 cp_reason = CP_WRONG_PINO;
0207 else if (!f2fs_space_for_roll_forward(sbi))
0208 cp_reason = CP_NO_SPC_ROLL;
0209 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
0210 cp_reason = CP_NODE_NEED_CP;
0211 else if (test_opt(sbi, FASTBOOT))
0212 cp_reason = CP_FASTBOOT_MODE;
0213 else if (F2FS_OPTION(sbi).active_logs == 2)
0214 cp_reason = CP_SPEC_LOG_NUM;
0215 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
0216 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
0217 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
0218 TRANS_DIR_INO))
0219 cp_reason = CP_RECOVER_DIR;
0220
0221 return cp_reason;
0222 }
0223
0224 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
0225 {
0226 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
0227 bool ret = false;
0228
0229 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
0230 ret = true;
0231 f2fs_put_page(i, 0);
0232 return ret;
0233 }
0234
0235 static void try_to_fix_pino(struct inode *inode)
0236 {
0237 struct f2fs_inode_info *fi = F2FS_I(inode);
0238 nid_t pino;
0239
0240 f2fs_down_write(&fi->i_sem);
0241 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
0242 get_parent_ino(inode, &pino)) {
0243 f2fs_i_pino_write(inode, pino);
0244 file_got_pino(inode);
0245 }
0246 f2fs_up_write(&fi->i_sem);
0247 }
0248
0249 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
0250 int datasync, bool atomic)
0251 {
0252 struct inode *inode = file->f_mapping->host;
0253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0254 nid_t ino = inode->i_ino;
0255 int ret = 0;
0256 enum cp_reason_type cp_reason = 0;
0257 struct writeback_control wbc = {
0258 .sync_mode = WB_SYNC_ALL,
0259 .nr_to_write = LONG_MAX,
0260 .for_reclaim = 0,
0261 };
0262 unsigned int seq_id = 0;
0263
0264 if (unlikely(f2fs_readonly(inode->i_sb)))
0265 return 0;
0266
0267 trace_f2fs_sync_file_enter(inode);
0268
0269 if (S_ISDIR(inode->i_mode))
0270 goto go_write;
0271
0272
0273 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
0274 set_inode_flag(inode, FI_NEED_IPU);
0275 ret = file_write_and_wait_range(file, start, end);
0276 clear_inode_flag(inode, FI_NEED_IPU);
0277
0278 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
0279 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
0280 return ret;
0281 }
0282
0283
0284 if (!f2fs_skip_inode_update(inode, datasync)) {
0285 f2fs_write_inode(inode, NULL);
0286 goto go_write;
0287 }
0288
0289
0290
0291
0292 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
0293 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
0294
0295
0296 if (need_inode_page_update(sbi, ino))
0297 goto go_write;
0298
0299 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
0300 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
0301 goto flush_out;
0302 goto out;
0303 } else {
0304
0305
0306
0307
0308
0309
0310
0311
0312 if (F2FS_OPTION(sbi).fsync_mode ==
0313 FSYNC_MODE_STRICT && !atomic)
0314 atomic = true;
0315 }
0316 go_write:
0317
0318
0319
0320
0321 f2fs_down_read(&F2FS_I(inode)->i_sem);
0322 cp_reason = need_do_checkpoint(inode);
0323 f2fs_up_read(&F2FS_I(inode)->i_sem);
0324
0325 if (cp_reason) {
0326
0327 ret = f2fs_sync_fs(inode->i_sb, 1);
0328
0329
0330
0331
0332
0333 try_to_fix_pino(inode);
0334 clear_inode_flag(inode, FI_APPEND_WRITE);
0335 clear_inode_flag(inode, FI_UPDATE_WRITE);
0336 goto out;
0337 }
0338 sync_nodes:
0339 atomic_inc(&sbi->wb_sync_req[NODE]);
0340 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
0341 atomic_dec(&sbi->wb_sync_req[NODE]);
0342 if (ret)
0343 goto out;
0344
0345
0346 if (unlikely(f2fs_cp_error(sbi))) {
0347 ret = -EIO;
0348 goto out;
0349 }
0350
0351 if (f2fs_need_inode_block_update(sbi, ino)) {
0352 f2fs_mark_inode_dirty_sync(inode, true);
0353 f2fs_write_inode(inode, NULL);
0354 goto sync_nodes;
0355 }
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 if (!atomic) {
0366 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
0367 if (ret)
0368 goto out;
0369 }
0370
0371
0372 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
0373 clear_inode_flag(inode, FI_APPEND_WRITE);
0374 flush_out:
0375 if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
0376 (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
0377 ret = f2fs_issue_flush(sbi, inode->i_ino);
0378 if (!ret) {
0379 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
0380 clear_inode_flag(inode, FI_UPDATE_WRITE);
0381 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
0382 }
0383 f2fs_update_time(sbi, REQ_TIME);
0384 out:
0385 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
0386 return ret;
0387 }
0388
0389 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
0390 {
0391 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
0392 return -EIO;
0393 return f2fs_do_sync_file(file, start, end, datasync, false);
0394 }
0395
0396 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
0397 pgoff_t index, int whence)
0398 {
0399 switch (whence) {
0400 case SEEK_DATA:
0401 if (__is_valid_data_blkaddr(blkaddr))
0402 return true;
0403 if (blkaddr == NEW_ADDR &&
0404 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
0405 return true;
0406 break;
0407 case SEEK_HOLE:
0408 if (blkaddr == NULL_ADDR)
0409 return true;
0410 break;
0411 }
0412 return false;
0413 }
0414
0415 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
0416 {
0417 struct inode *inode = file->f_mapping->host;
0418 loff_t maxbytes = inode->i_sb->s_maxbytes;
0419 struct dnode_of_data dn;
0420 pgoff_t pgofs, end_offset;
0421 loff_t data_ofs = offset;
0422 loff_t isize;
0423 int err = 0;
0424
0425 inode_lock(inode);
0426
0427 isize = i_size_read(inode);
0428 if (offset >= isize)
0429 goto fail;
0430
0431
0432 if (f2fs_has_inline_data(inode)) {
0433 if (whence == SEEK_HOLE) {
0434 data_ofs = isize;
0435 goto found;
0436 } else if (whence == SEEK_DATA) {
0437 data_ofs = offset;
0438 goto found;
0439 }
0440 }
0441
0442 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
0443
0444 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
0445 set_new_dnode(&dn, inode, NULL, NULL, 0);
0446 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
0447 if (err && err != -ENOENT) {
0448 goto fail;
0449 } else if (err == -ENOENT) {
0450
0451 if (whence == SEEK_DATA) {
0452 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
0453 continue;
0454 } else {
0455 goto found;
0456 }
0457 }
0458
0459 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
0460
0461
0462 for (; dn.ofs_in_node < end_offset;
0463 dn.ofs_in_node++, pgofs++,
0464 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
0465 block_t blkaddr;
0466
0467 blkaddr = f2fs_data_blkaddr(&dn);
0468
0469 if (__is_valid_data_blkaddr(blkaddr) &&
0470 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
0471 blkaddr, DATA_GENERIC_ENHANCE)) {
0472 f2fs_put_dnode(&dn);
0473 goto fail;
0474 }
0475
0476 if (__found_offset(file->f_mapping, blkaddr,
0477 pgofs, whence)) {
0478 f2fs_put_dnode(&dn);
0479 goto found;
0480 }
0481 }
0482 f2fs_put_dnode(&dn);
0483 }
0484
0485 if (whence == SEEK_DATA)
0486 goto fail;
0487 found:
0488 if (whence == SEEK_HOLE && data_ofs > isize)
0489 data_ofs = isize;
0490 inode_unlock(inode);
0491 return vfs_setpos(file, data_ofs, maxbytes);
0492 fail:
0493 inode_unlock(inode);
0494 return -ENXIO;
0495 }
0496
0497 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
0498 {
0499 struct inode *inode = file->f_mapping->host;
0500 loff_t maxbytes = inode->i_sb->s_maxbytes;
0501
0502 if (f2fs_compressed_file(inode))
0503 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
0504
0505 switch (whence) {
0506 case SEEK_SET:
0507 case SEEK_CUR:
0508 case SEEK_END:
0509 return generic_file_llseek_size(file, offset, whence,
0510 maxbytes, i_size_read(inode));
0511 case SEEK_DATA:
0512 case SEEK_HOLE:
0513 if (offset < 0)
0514 return -ENXIO;
0515 return f2fs_seek_block(file, offset, whence);
0516 }
0517
0518 return -EINVAL;
0519 }
0520
0521 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
0522 {
0523 struct inode *inode = file_inode(file);
0524
0525 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
0526 return -EIO;
0527
0528 if (!f2fs_is_compress_backend_ready(inode))
0529 return -EOPNOTSUPP;
0530
0531 file_accessed(file);
0532 vma->vm_ops = &f2fs_file_vm_ops;
0533 set_inode_flag(inode, FI_MMAP_FILE);
0534 return 0;
0535 }
0536
0537 static int f2fs_file_open(struct inode *inode, struct file *filp)
0538 {
0539 int err = fscrypt_file_open(inode, filp);
0540
0541 if (err)
0542 return err;
0543
0544 if (!f2fs_is_compress_backend_ready(inode))
0545 return -EOPNOTSUPP;
0546
0547 err = fsverity_file_open(inode, filp);
0548 if (err)
0549 return err;
0550
0551 filp->f_mode |= FMODE_NOWAIT;
0552
0553 return dquot_file_open(inode, filp);
0554 }
0555
0556 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
0557 {
0558 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
0559 struct f2fs_node *raw_node;
0560 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
0561 __le32 *addr;
0562 int base = 0;
0563 bool compressed_cluster = false;
0564 int cluster_index = 0, valid_blocks = 0;
0565 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
0566 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
0567
0568 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
0569 base = get_extra_isize(dn->inode);
0570
0571 raw_node = F2FS_NODE(dn->node_page);
0572 addr = blkaddr_in_node(raw_node) + base + ofs;
0573
0574
0575 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
0576 block_t blkaddr = le32_to_cpu(*addr);
0577
0578 if (f2fs_compressed_file(dn->inode) &&
0579 !(cluster_index & (cluster_size - 1))) {
0580 if (compressed_cluster)
0581 f2fs_i_compr_blocks_update(dn->inode,
0582 valid_blocks, false);
0583 compressed_cluster = (blkaddr == COMPRESS_ADDR);
0584 valid_blocks = 0;
0585 }
0586
0587 if (blkaddr == NULL_ADDR)
0588 continue;
0589
0590 dn->data_blkaddr = NULL_ADDR;
0591 f2fs_set_data_blkaddr(dn);
0592
0593 if (__is_valid_data_blkaddr(blkaddr)) {
0594 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
0595 DATA_GENERIC_ENHANCE))
0596 continue;
0597 if (compressed_cluster)
0598 valid_blocks++;
0599 }
0600
0601 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
0602 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
0603
0604 f2fs_invalidate_blocks(sbi, blkaddr);
0605
0606 if (!released || blkaddr != COMPRESS_ADDR)
0607 nr_free++;
0608 }
0609
0610 if (compressed_cluster)
0611 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
0612
0613 if (nr_free) {
0614 pgoff_t fofs;
0615
0616
0617
0618
0619 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
0620 dn->inode) + ofs;
0621 f2fs_update_extent_cache_range(dn, fofs, 0, len);
0622 dec_valid_block_count(sbi, dn->inode, nr_free);
0623 }
0624 dn->ofs_in_node = ofs;
0625
0626 f2fs_update_time(sbi, REQ_TIME);
0627 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
0628 dn->ofs_in_node, nr_free);
0629 }
0630
0631 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
0632 {
0633 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
0634 }
0635
0636 static int truncate_partial_data_page(struct inode *inode, u64 from,
0637 bool cache_only)
0638 {
0639 loff_t offset = from & (PAGE_SIZE - 1);
0640 pgoff_t index = from >> PAGE_SHIFT;
0641 struct address_space *mapping = inode->i_mapping;
0642 struct page *page;
0643
0644 if (!offset && !cache_only)
0645 return 0;
0646
0647 if (cache_only) {
0648 page = find_lock_page(mapping, index);
0649 if (page && PageUptodate(page))
0650 goto truncate_out;
0651 f2fs_put_page(page, 1);
0652 return 0;
0653 }
0654
0655 page = f2fs_get_lock_data_page(inode, index, true);
0656 if (IS_ERR(page))
0657 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
0658 truncate_out:
0659 f2fs_wait_on_page_writeback(page, DATA, true, true);
0660 zero_user(page, offset, PAGE_SIZE - offset);
0661
0662
0663 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
0664 if (!cache_only)
0665 set_page_dirty(page);
0666 f2fs_put_page(page, 1);
0667 return 0;
0668 }
0669
0670 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
0671 {
0672 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0673 struct dnode_of_data dn;
0674 pgoff_t free_from;
0675 int count = 0, err = 0;
0676 struct page *ipage;
0677 bool truncate_page = false;
0678
0679 trace_f2fs_truncate_blocks_enter(inode, from);
0680
0681 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
0682
0683 if (free_from >= max_file_blocks(inode))
0684 goto free_partial;
0685
0686 if (lock)
0687 f2fs_lock_op(sbi);
0688
0689 ipage = f2fs_get_node_page(sbi, inode->i_ino);
0690 if (IS_ERR(ipage)) {
0691 err = PTR_ERR(ipage);
0692 goto out;
0693 }
0694
0695 if (f2fs_has_inline_data(inode)) {
0696 f2fs_truncate_inline_inode(inode, ipage, from);
0697 f2fs_put_page(ipage, 1);
0698 truncate_page = true;
0699 goto out;
0700 }
0701
0702 set_new_dnode(&dn, inode, ipage, NULL, 0);
0703 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
0704 if (err) {
0705 if (err == -ENOENT)
0706 goto free_next;
0707 goto out;
0708 }
0709
0710 count = ADDRS_PER_PAGE(dn.node_page, inode);
0711
0712 count -= dn.ofs_in_node;
0713 f2fs_bug_on(sbi, count < 0);
0714
0715 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
0716 f2fs_truncate_data_blocks_range(&dn, count);
0717 free_from += count;
0718 }
0719
0720 f2fs_put_dnode(&dn);
0721 free_next:
0722 err = f2fs_truncate_inode_blocks(inode, free_from);
0723 out:
0724 if (lock)
0725 f2fs_unlock_op(sbi);
0726 free_partial:
0727
0728 if (!err)
0729 err = truncate_partial_data_page(inode, from, truncate_page);
0730
0731 trace_f2fs_truncate_blocks_exit(inode, err);
0732 return err;
0733 }
0734
0735 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
0736 {
0737 u64 free_from = from;
0738 int err;
0739
0740 #ifdef CONFIG_F2FS_FS_COMPRESSION
0741
0742
0743
0744
0745 if (f2fs_compressed_file(inode))
0746 free_from = round_up(from,
0747 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
0748 #endif
0749
0750 err = f2fs_do_truncate_blocks(inode, free_from, lock);
0751 if (err)
0752 return err;
0753
0754 #ifdef CONFIG_F2FS_FS_COMPRESSION
0755
0756
0757
0758
0759 if (f2fs_compressed_file(inode) && !free_from
0760 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
0761 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
0762
0763 if (from != free_from) {
0764 err = f2fs_truncate_partial_cluster(inode, from, lock);
0765 if (err)
0766 return err;
0767 }
0768 #endif
0769
0770 return 0;
0771 }
0772
0773 int f2fs_truncate(struct inode *inode)
0774 {
0775 int err;
0776
0777 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
0778 return -EIO;
0779
0780 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
0781 S_ISLNK(inode->i_mode)))
0782 return 0;
0783
0784 trace_f2fs_truncate(inode);
0785
0786 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
0787 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
0788 return -EIO;
0789 }
0790
0791 err = f2fs_dquot_initialize(inode);
0792 if (err)
0793 return err;
0794
0795
0796 if (!f2fs_may_inline_data(inode)) {
0797 err = f2fs_convert_inline_inode(inode);
0798 if (err)
0799 return err;
0800 }
0801
0802 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
0803 if (err)
0804 return err;
0805
0806 inode->i_mtime = inode->i_ctime = current_time(inode);
0807 f2fs_mark_inode_dirty_sync(inode, false);
0808 return 0;
0809 }
0810
0811 int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
0812 struct kstat *stat, u32 request_mask, unsigned int query_flags)
0813 {
0814 struct inode *inode = d_inode(path->dentry);
0815 struct f2fs_inode_info *fi = F2FS_I(inode);
0816 struct f2fs_inode *ri = NULL;
0817 unsigned int flags;
0818
0819 if (f2fs_has_extra_attr(inode) &&
0820 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
0821 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
0822 stat->result_mask |= STATX_BTIME;
0823 stat->btime.tv_sec = fi->i_crtime.tv_sec;
0824 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
0825 }
0826
0827 flags = fi->i_flags;
0828 if (flags & F2FS_COMPR_FL)
0829 stat->attributes |= STATX_ATTR_COMPRESSED;
0830 if (flags & F2FS_APPEND_FL)
0831 stat->attributes |= STATX_ATTR_APPEND;
0832 if (IS_ENCRYPTED(inode))
0833 stat->attributes |= STATX_ATTR_ENCRYPTED;
0834 if (flags & F2FS_IMMUTABLE_FL)
0835 stat->attributes |= STATX_ATTR_IMMUTABLE;
0836 if (flags & F2FS_NODUMP_FL)
0837 stat->attributes |= STATX_ATTR_NODUMP;
0838 if (IS_VERITY(inode))
0839 stat->attributes |= STATX_ATTR_VERITY;
0840
0841 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
0842 STATX_ATTR_APPEND |
0843 STATX_ATTR_ENCRYPTED |
0844 STATX_ATTR_IMMUTABLE |
0845 STATX_ATTR_NODUMP |
0846 STATX_ATTR_VERITY);
0847
0848 generic_fillattr(mnt_userns, inode, stat);
0849
0850
0851 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
0852 f2fs_has_inline_dentry(inode))
0853 stat->blocks += (stat->size + 511) >> 9;
0854
0855 return 0;
0856 }
0857
0858 #ifdef CONFIG_F2FS_FS_POSIX_ACL
0859 static void __setattr_copy(struct user_namespace *mnt_userns,
0860 struct inode *inode, const struct iattr *attr)
0861 {
0862 unsigned int ia_valid = attr->ia_valid;
0863
0864 i_uid_update(mnt_userns, attr, inode);
0865 i_gid_update(mnt_userns, attr, inode);
0866 if (ia_valid & ATTR_ATIME)
0867 inode->i_atime = attr->ia_atime;
0868 if (ia_valid & ATTR_MTIME)
0869 inode->i_mtime = attr->ia_mtime;
0870 if (ia_valid & ATTR_CTIME)
0871 inode->i_ctime = attr->ia_ctime;
0872 if (ia_valid & ATTR_MODE) {
0873 umode_t mode = attr->ia_mode;
0874 kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
0875
0876 if (!in_group_p(kgid) && !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
0877 mode &= ~S_ISGID;
0878 set_acl_inode(inode, mode);
0879 }
0880 }
0881 #else
0882 #define __setattr_copy setattr_copy
0883 #endif
0884
0885 int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
0886 struct iattr *attr)
0887 {
0888 struct inode *inode = d_inode(dentry);
0889 int err;
0890
0891 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
0892 return -EIO;
0893
0894 if (unlikely(IS_IMMUTABLE(inode)))
0895 return -EPERM;
0896
0897 if (unlikely(IS_APPEND(inode) &&
0898 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
0899 ATTR_GID | ATTR_TIMES_SET))))
0900 return -EPERM;
0901
0902 if ((attr->ia_valid & ATTR_SIZE) &&
0903 !f2fs_is_compress_backend_ready(inode))
0904 return -EOPNOTSUPP;
0905
0906 err = setattr_prepare(mnt_userns, dentry, attr);
0907 if (err)
0908 return err;
0909
0910 err = fscrypt_prepare_setattr(dentry, attr);
0911 if (err)
0912 return err;
0913
0914 err = fsverity_prepare_setattr(dentry, attr);
0915 if (err)
0916 return err;
0917
0918 if (is_quota_modification(mnt_userns, inode, attr)) {
0919 err = f2fs_dquot_initialize(inode);
0920 if (err)
0921 return err;
0922 }
0923 if (i_uid_needs_update(mnt_userns, attr, inode) ||
0924 i_gid_needs_update(mnt_userns, attr, inode)) {
0925 f2fs_lock_op(F2FS_I_SB(inode));
0926 err = dquot_transfer(mnt_userns, inode, attr);
0927 if (err) {
0928 set_sbi_flag(F2FS_I_SB(inode),
0929 SBI_QUOTA_NEED_REPAIR);
0930 f2fs_unlock_op(F2FS_I_SB(inode));
0931 return err;
0932 }
0933
0934
0935
0936
0937 i_uid_update(mnt_userns, attr, inode);
0938 i_gid_update(mnt_userns, attr, inode);
0939 f2fs_mark_inode_dirty_sync(inode, true);
0940 f2fs_unlock_op(F2FS_I_SB(inode));
0941 }
0942
0943 if (attr->ia_valid & ATTR_SIZE) {
0944 loff_t old_size = i_size_read(inode);
0945
0946 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
0947
0948
0949
0950
0951 err = f2fs_convert_inline_inode(inode);
0952 if (err)
0953 return err;
0954 }
0955
0956 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
0957 filemap_invalidate_lock(inode->i_mapping);
0958
0959 truncate_setsize(inode, attr->ia_size);
0960
0961 if (attr->ia_size <= old_size)
0962 err = f2fs_truncate(inode);
0963
0964
0965
0966
0967 filemap_invalidate_unlock(inode->i_mapping);
0968 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
0969 if (err)
0970 return err;
0971
0972 spin_lock(&F2FS_I(inode)->i_size_lock);
0973 inode->i_mtime = inode->i_ctime = current_time(inode);
0974 F2FS_I(inode)->last_disk_size = i_size_read(inode);
0975 spin_unlock(&F2FS_I(inode)->i_size_lock);
0976 }
0977
0978 __setattr_copy(mnt_userns, inode, attr);
0979
0980 if (attr->ia_valid & ATTR_MODE) {
0981 err = posix_acl_chmod(mnt_userns, inode, f2fs_get_inode_mode(inode));
0982
0983 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
0984 if (!err)
0985 inode->i_mode = F2FS_I(inode)->i_acl_mode;
0986 clear_inode_flag(inode, FI_ACL_MODE);
0987 }
0988 }
0989
0990
0991 f2fs_mark_inode_dirty_sync(inode, true);
0992
0993
0994 f2fs_balance_fs(F2FS_I_SB(inode), true);
0995
0996 return err;
0997 }
0998
0999 const struct inode_operations f2fs_file_inode_operations = {
1000 .getattr = f2fs_getattr,
1001 .setattr = f2fs_setattr,
1002 .get_acl = f2fs_get_acl,
1003 .set_acl = f2fs_set_acl,
1004 .listxattr = f2fs_listxattr,
1005 .fiemap = f2fs_fiemap,
1006 .fileattr_get = f2fs_fileattr_get,
1007 .fileattr_set = f2fs_fileattr_set,
1008 };
1009
1010 static int fill_zero(struct inode *inode, pgoff_t index,
1011 loff_t start, loff_t len)
1012 {
1013 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1014 struct page *page;
1015
1016 if (!len)
1017 return 0;
1018
1019 f2fs_balance_fs(sbi, true);
1020
1021 f2fs_lock_op(sbi);
1022 page = f2fs_get_new_data_page(inode, NULL, index, false);
1023 f2fs_unlock_op(sbi);
1024
1025 if (IS_ERR(page))
1026 return PTR_ERR(page);
1027
1028 f2fs_wait_on_page_writeback(page, DATA, true, true);
1029 zero_user(page, start, len);
1030 set_page_dirty(page);
1031 f2fs_put_page(page, 1);
1032 return 0;
1033 }
1034
1035 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1036 {
1037 int err;
1038
1039 while (pg_start < pg_end) {
1040 struct dnode_of_data dn;
1041 pgoff_t end_offset, count;
1042
1043 set_new_dnode(&dn, inode, NULL, NULL, 0);
1044 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1045 if (err) {
1046 if (err == -ENOENT) {
1047 pg_start = f2fs_get_next_page_offset(&dn,
1048 pg_start);
1049 continue;
1050 }
1051 return err;
1052 }
1053
1054 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1055 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1056
1057 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1058
1059 f2fs_truncate_data_blocks_range(&dn, count);
1060 f2fs_put_dnode(&dn);
1061
1062 pg_start += count;
1063 }
1064 return 0;
1065 }
1066
1067 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1068 {
1069 pgoff_t pg_start, pg_end;
1070 loff_t off_start, off_end;
1071 int ret;
1072
1073 ret = f2fs_convert_inline_inode(inode);
1074 if (ret)
1075 return ret;
1076
1077 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1078 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1079
1080 off_start = offset & (PAGE_SIZE - 1);
1081 off_end = (offset + len) & (PAGE_SIZE - 1);
1082
1083 if (pg_start == pg_end) {
1084 ret = fill_zero(inode, pg_start, off_start,
1085 off_end - off_start);
1086 if (ret)
1087 return ret;
1088 } else {
1089 if (off_start) {
1090 ret = fill_zero(inode, pg_start++, off_start,
1091 PAGE_SIZE - off_start);
1092 if (ret)
1093 return ret;
1094 }
1095 if (off_end) {
1096 ret = fill_zero(inode, pg_end, 0, off_end);
1097 if (ret)
1098 return ret;
1099 }
1100
1101 if (pg_start < pg_end) {
1102 loff_t blk_start, blk_end;
1103 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1104
1105 f2fs_balance_fs(sbi, true);
1106
1107 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1108 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1109
1110 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1111 filemap_invalidate_lock(inode->i_mapping);
1112
1113 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1114
1115 f2fs_lock_op(sbi);
1116 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1117 f2fs_unlock_op(sbi);
1118
1119 filemap_invalidate_unlock(inode->i_mapping);
1120 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1121 }
1122 }
1123
1124 return ret;
1125 }
1126
1127 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1128 int *do_replace, pgoff_t off, pgoff_t len)
1129 {
1130 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1131 struct dnode_of_data dn;
1132 int ret, done, i;
1133
1134 next_dnode:
1135 set_new_dnode(&dn, inode, NULL, NULL, 0);
1136 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1137 if (ret && ret != -ENOENT) {
1138 return ret;
1139 } else if (ret == -ENOENT) {
1140 if (dn.max_level == 0)
1141 return -ENOENT;
1142 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1143 dn.ofs_in_node, len);
1144 blkaddr += done;
1145 do_replace += done;
1146 goto next;
1147 }
1148
1149 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1150 dn.ofs_in_node, len);
1151 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1152 *blkaddr = f2fs_data_blkaddr(&dn);
1153
1154 if (__is_valid_data_blkaddr(*blkaddr) &&
1155 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1156 DATA_GENERIC_ENHANCE)) {
1157 f2fs_put_dnode(&dn);
1158 return -EFSCORRUPTED;
1159 }
1160
1161 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1162
1163 if (f2fs_lfs_mode(sbi)) {
1164 f2fs_put_dnode(&dn);
1165 return -EOPNOTSUPP;
1166 }
1167
1168
1169 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1170 *do_replace = 1;
1171 }
1172 }
1173 f2fs_put_dnode(&dn);
1174 next:
1175 len -= done;
1176 off += done;
1177 if (len)
1178 goto next_dnode;
1179 return 0;
1180 }
1181
1182 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1183 int *do_replace, pgoff_t off, int len)
1184 {
1185 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1186 struct dnode_of_data dn;
1187 int ret, i;
1188
1189 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1190 if (*do_replace == 0)
1191 continue;
1192
1193 set_new_dnode(&dn, inode, NULL, NULL, 0);
1194 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1195 if (ret) {
1196 dec_valid_block_count(sbi, inode, 1);
1197 f2fs_invalidate_blocks(sbi, *blkaddr);
1198 } else {
1199 f2fs_update_data_blkaddr(&dn, *blkaddr);
1200 }
1201 f2fs_put_dnode(&dn);
1202 }
1203 return 0;
1204 }
1205
1206 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1207 block_t *blkaddr, int *do_replace,
1208 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1209 {
1210 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1211 pgoff_t i = 0;
1212 int ret;
1213
1214 while (i < len) {
1215 if (blkaddr[i] == NULL_ADDR && !full) {
1216 i++;
1217 continue;
1218 }
1219
1220 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1221 struct dnode_of_data dn;
1222 struct node_info ni;
1223 size_t new_size;
1224 pgoff_t ilen;
1225
1226 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1227 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1228 if (ret)
1229 return ret;
1230
1231 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1232 if (ret) {
1233 f2fs_put_dnode(&dn);
1234 return ret;
1235 }
1236
1237 ilen = min((pgoff_t)
1238 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1239 dn.ofs_in_node, len - i);
1240 do {
1241 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1242 f2fs_truncate_data_blocks_range(&dn, 1);
1243
1244 if (do_replace[i]) {
1245 f2fs_i_blocks_write(src_inode,
1246 1, false, false);
1247 f2fs_i_blocks_write(dst_inode,
1248 1, true, false);
1249 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1250 blkaddr[i], ni.version, true, false);
1251
1252 do_replace[i] = 0;
1253 }
1254 dn.ofs_in_node++;
1255 i++;
1256 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1257 if (dst_inode->i_size < new_size)
1258 f2fs_i_size_write(dst_inode, new_size);
1259 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1260
1261 f2fs_put_dnode(&dn);
1262 } else {
1263 struct page *psrc, *pdst;
1264
1265 psrc = f2fs_get_lock_data_page(src_inode,
1266 src + i, true);
1267 if (IS_ERR(psrc))
1268 return PTR_ERR(psrc);
1269 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1270 true);
1271 if (IS_ERR(pdst)) {
1272 f2fs_put_page(psrc, 1);
1273 return PTR_ERR(pdst);
1274 }
1275 memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1276 set_page_dirty(pdst);
1277 f2fs_put_page(pdst, 1);
1278 f2fs_put_page(psrc, 1);
1279
1280 ret = f2fs_truncate_hole(src_inode,
1281 src + i, src + i + 1);
1282 if (ret)
1283 return ret;
1284 i++;
1285 }
1286 }
1287 return 0;
1288 }
1289
1290 static int __exchange_data_block(struct inode *src_inode,
1291 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1292 pgoff_t len, bool full)
1293 {
1294 block_t *src_blkaddr;
1295 int *do_replace;
1296 pgoff_t olen;
1297 int ret;
1298
1299 while (len) {
1300 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1301
1302 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1303 array_size(olen, sizeof(block_t)),
1304 GFP_NOFS);
1305 if (!src_blkaddr)
1306 return -ENOMEM;
1307
1308 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1309 array_size(olen, sizeof(int)),
1310 GFP_NOFS);
1311 if (!do_replace) {
1312 kvfree(src_blkaddr);
1313 return -ENOMEM;
1314 }
1315
1316 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1317 do_replace, src, olen);
1318 if (ret)
1319 goto roll_back;
1320
1321 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1322 do_replace, src, dst, olen, full);
1323 if (ret)
1324 goto roll_back;
1325
1326 src += olen;
1327 dst += olen;
1328 len -= olen;
1329
1330 kvfree(src_blkaddr);
1331 kvfree(do_replace);
1332 }
1333 return 0;
1334
1335 roll_back:
1336 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1337 kvfree(src_blkaddr);
1338 kvfree(do_replace);
1339 return ret;
1340 }
1341
1342 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1343 {
1344 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1345 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1346 pgoff_t start = offset >> PAGE_SHIFT;
1347 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1348 int ret;
1349
1350 f2fs_balance_fs(sbi, true);
1351
1352
1353 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1354 filemap_invalidate_lock(inode->i_mapping);
1355
1356 f2fs_lock_op(sbi);
1357 f2fs_drop_extent_tree(inode);
1358 truncate_pagecache(inode, offset);
1359 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1360 f2fs_unlock_op(sbi);
1361
1362 filemap_invalidate_unlock(inode->i_mapping);
1363 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1364 return ret;
1365 }
1366
1367 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1368 {
1369 loff_t new_size;
1370 int ret;
1371
1372 if (offset + len >= i_size_read(inode))
1373 return -EINVAL;
1374
1375
1376 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1377 return -EINVAL;
1378
1379 ret = f2fs_convert_inline_inode(inode);
1380 if (ret)
1381 return ret;
1382
1383
1384 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1385 if (ret)
1386 return ret;
1387
1388 ret = f2fs_do_collapse(inode, offset, len);
1389 if (ret)
1390 return ret;
1391
1392
1393 filemap_invalidate_lock(inode->i_mapping);
1394 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1395 truncate_pagecache(inode, offset);
1396
1397 new_size = i_size_read(inode) - len;
1398 ret = f2fs_truncate_blocks(inode, new_size, true);
1399 filemap_invalidate_unlock(inode->i_mapping);
1400 if (!ret)
1401 f2fs_i_size_write(inode, new_size);
1402 return ret;
1403 }
1404
1405 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1406 pgoff_t end)
1407 {
1408 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1409 pgoff_t index = start;
1410 unsigned int ofs_in_node = dn->ofs_in_node;
1411 blkcnt_t count = 0;
1412 int ret;
1413
1414 for (; index < end; index++, dn->ofs_in_node++) {
1415 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1416 count++;
1417 }
1418
1419 dn->ofs_in_node = ofs_in_node;
1420 ret = f2fs_reserve_new_blocks(dn, count);
1421 if (ret)
1422 return ret;
1423
1424 dn->ofs_in_node = ofs_in_node;
1425 for (index = start; index < end; index++, dn->ofs_in_node++) {
1426 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1427
1428
1429
1430
1431 if (dn->data_blkaddr == NULL_ADDR) {
1432 ret = -ENOSPC;
1433 break;
1434 }
1435
1436 if (dn->data_blkaddr == NEW_ADDR)
1437 continue;
1438
1439 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1440 DATA_GENERIC_ENHANCE)) {
1441 ret = -EFSCORRUPTED;
1442 break;
1443 }
1444
1445 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1446 dn->data_blkaddr = NEW_ADDR;
1447 f2fs_set_data_blkaddr(dn);
1448 }
1449
1450 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1451
1452 return ret;
1453 }
1454
1455 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1456 int mode)
1457 {
1458 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1459 struct address_space *mapping = inode->i_mapping;
1460 pgoff_t index, pg_start, pg_end;
1461 loff_t new_size = i_size_read(inode);
1462 loff_t off_start, off_end;
1463 int ret = 0;
1464
1465 ret = inode_newsize_ok(inode, (len + offset));
1466 if (ret)
1467 return ret;
1468
1469 ret = f2fs_convert_inline_inode(inode);
1470 if (ret)
1471 return ret;
1472
1473 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1474 if (ret)
1475 return ret;
1476
1477 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1478 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1479
1480 off_start = offset & (PAGE_SIZE - 1);
1481 off_end = (offset + len) & (PAGE_SIZE - 1);
1482
1483 if (pg_start == pg_end) {
1484 ret = fill_zero(inode, pg_start, off_start,
1485 off_end - off_start);
1486 if (ret)
1487 return ret;
1488
1489 new_size = max_t(loff_t, new_size, offset + len);
1490 } else {
1491 if (off_start) {
1492 ret = fill_zero(inode, pg_start++, off_start,
1493 PAGE_SIZE - off_start);
1494 if (ret)
1495 return ret;
1496
1497 new_size = max_t(loff_t, new_size,
1498 (loff_t)pg_start << PAGE_SHIFT);
1499 }
1500
1501 for (index = pg_start; index < pg_end;) {
1502 struct dnode_of_data dn;
1503 unsigned int end_offset;
1504 pgoff_t end;
1505
1506 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1507 filemap_invalidate_lock(mapping);
1508
1509 truncate_pagecache_range(inode,
1510 (loff_t)index << PAGE_SHIFT,
1511 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1512
1513 f2fs_lock_op(sbi);
1514
1515 set_new_dnode(&dn, inode, NULL, NULL, 0);
1516 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1517 if (ret) {
1518 f2fs_unlock_op(sbi);
1519 filemap_invalidate_unlock(mapping);
1520 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1521 goto out;
1522 }
1523
1524 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1525 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1526
1527 ret = f2fs_do_zero_range(&dn, index, end);
1528 f2fs_put_dnode(&dn);
1529
1530 f2fs_unlock_op(sbi);
1531 filemap_invalidate_unlock(mapping);
1532 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1533
1534 f2fs_balance_fs(sbi, dn.node_changed);
1535
1536 if (ret)
1537 goto out;
1538
1539 index = end;
1540 new_size = max_t(loff_t, new_size,
1541 (loff_t)index << PAGE_SHIFT);
1542 }
1543
1544 if (off_end) {
1545 ret = fill_zero(inode, pg_end, 0, off_end);
1546 if (ret)
1547 goto out;
1548
1549 new_size = max_t(loff_t, new_size, offset + len);
1550 }
1551 }
1552
1553 out:
1554 if (new_size > i_size_read(inode)) {
1555 if (mode & FALLOC_FL_KEEP_SIZE)
1556 file_set_keep_isize(inode);
1557 else
1558 f2fs_i_size_write(inode, new_size);
1559 }
1560 return ret;
1561 }
1562
1563 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1564 {
1565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1566 struct address_space *mapping = inode->i_mapping;
1567 pgoff_t nr, pg_start, pg_end, delta, idx;
1568 loff_t new_size;
1569 int ret = 0;
1570
1571 new_size = i_size_read(inode) + len;
1572 ret = inode_newsize_ok(inode, new_size);
1573 if (ret)
1574 return ret;
1575
1576 if (offset >= i_size_read(inode))
1577 return -EINVAL;
1578
1579
1580 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1581 return -EINVAL;
1582
1583 ret = f2fs_convert_inline_inode(inode);
1584 if (ret)
1585 return ret;
1586
1587 f2fs_balance_fs(sbi, true);
1588
1589 filemap_invalidate_lock(mapping);
1590 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1591 filemap_invalidate_unlock(mapping);
1592 if (ret)
1593 return ret;
1594
1595
1596 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1597 if (ret)
1598 return ret;
1599
1600 pg_start = offset >> PAGE_SHIFT;
1601 pg_end = (offset + len) >> PAGE_SHIFT;
1602 delta = pg_end - pg_start;
1603 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1604
1605
1606 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1607 filemap_invalidate_lock(mapping);
1608 truncate_pagecache(inode, offset);
1609
1610 while (!ret && idx > pg_start) {
1611 nr = idx - pg_start;
1612 if (nr > delta)
1613 nr = delta;
1614 idx -= nr;
1615
1616 f2fs_lock_op(sbi);
1617 f2fs_drop_extent_tree(inode);
1618
1619 ret = __exchange_data_block(inode, inode, idx,
1620 idx + delta, nr, false);
1621 f2fs_unlock_op(sbi);
1622 }
1623 filemap_invalidate_unlock(mapping);
1624 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1625
1626
1627 filemap_invalidate_lock(mapping);
1628 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1629 truncate_pagecache(inode, offset);
1630 filemap_invalidate_unlock(mapping);
1631
1632 if (!ret)
1633 f2fs_i_size_write(inode, new_size);
1634 return ret;
1635 }
1636
1637 static int expand_inode_data(struct inode *inode, loff_t offset,
1638 loff_t len, int mode)
1639 {
1640 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1641 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1642 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1643 .m_may_create = true };
1644 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1645 .init_gc_type = FG_GC,
1646 .should_migrate_blocks = false,
1647 .err_gc_skipped = true,
1648 .nr_free_secs = 0 };
1649 pgoff_t pg_start, pg_end;
1650 loff_t new_size = i_size_read(inode);
1651 loff_t off_end;
1652 block_t expanded = 0;
1653 int err;
1654
1655 err = inode_newsize_ok(inode, (len + offset));
1656 if (err)
1657 return err;
1658
1659 err = f2fs_convert_inline_inode(inode);
1660 if (err)
1661 return err;
1662
1663 f2fs_balance_fs(sbi, true);
1664
1665 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1666 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1667 off_end = (offset + len) & (PAGE_SIZE - 1);
1668
1669 map.m_lblk = pg_start;
1670 map.m_len = pg_end - pg_start;
1671 if (off_end)
1672 map.m_len++;
1673
1674 if (!map.m_len)
1675 return 0;
1676
1677 if (f2fs_is_pinned_file(inode)) {
1678 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1679 block_t sec_len = roundup(map.m_len, sec_blks);
1680
1681 map.m_len = sec_blks;
1682 next_alloc:
1683 if (has_not_enough_free_secs(sbi, 0,
1684 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1685 f2fs_down_write(&sbi->gc_lock);
1686 err = f2fs_gc(sbi, &gc_control);
1687 if (err && err != -ENODATA)
1688 goto out_err;
1689 }
1690
1691 f2fs_down_write(&sbi->pin_sem);
1692
1693 f2fs_lock_op(sbi);
1694 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1695 f2fs_unlock_op(sbi);
1696
1697 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1698 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1699 file_dont_truncate(inode);
1700
1701 f2fs_up_write(&sbi->pin_sem);
1702
1703 expanded += map.m_len;
1704 sec_len -= map.m_len;
1705 map.m_lblk += map.m_len;
1706 if (!err && sec_len)
1707 goto next_alloc;
1708
1709 map.m_len = expanded;
1710 } else {
1711 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1712 expanded = map.m_len;
1713 }
1714 out_err:
1715 if (err) {
1716 pgoff_t last_off;
1717
1718 if (!expanded)
1719 return err;
1720
1721 last_off = pg_start + expanded - 1;
1722
1723
1724 new_size = (last_off == pg_end) ? offset + len :
1725 (loff_t)(last_off + 1) << PAGE_SHIFT;
1726 } else {
1727 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1728 }
1729
1730 if (new_size > i_size_read(inode)) {
1731 if (mode & FALLOC_FL_KEEP_SIZE)
1732 file_set_keep_isize(inode);
1733 else
1734 f2fs_i_size_write(inode, new_size);
1735 }
1736
1737 return err;
1738 }
1739
1740 static long f2fs_fallocate(struct file *file, int mode,
1741 loff_t offset, loff_t len)
1742 {
1743 struct inode *inode = file_inode(file);
1744 long ret = 0;
1745
1746 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1747 return -EIO;
1748 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1749 return -ENOSPC;
1750 if (!f2fs_is_compress_backend_ready(inode))
1751 return -EOPNOTSUPP;
1752
1753
1754 if (!S_ISREG(inode->i_mode))
1755 return -EINVAL;
1756
1757 if (IS_ENCRYPTED(inode) &&
1758 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1759 return -EOPNOTSUPP;
1760
1761
1762
1763
1764
1765 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1766 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1767 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1768 return -EOPNOTSUPP;
1769
1770 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1771 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1772 FALLOC_FL_INSERT_RANGE))
1773 return -EOPNOTSUPP;
1774
1775 inode_lock(inode);
1776
1777 ret = file_modified(file);
1778 if (ret)
1779 goto out;
1780
1781 if (mode & FALLOC_FL_PUNCH_HOLE) {
1782 if (offset >= inode->i_size)
1783 goto out;
1784
1785 ret = punch_hole(inode, offset, len);
1786 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1787 ret = f2fs_collapse_range(inode, offset, len);
1788 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1789 ret = f2fs_zero_range(inode, offset, len, mode);
1790 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1791 ret = f2fs_insert_range(inode, offset, len);
1792 } else {
1793 ret = expand_inode_data(inode, offset, len, mode);
1794 }
1795
1796 if (!ret) {
1797 inode->i_mtime = inode->i_ctime = current_time(inode);
1798 f2fs_mark_inode_dirty_sync(inode, false);
1799 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1800 }
1801
1802 out:
1803 inode_unlock(inode);
1804
1805 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1806 return ret;
1807 }
1808
1809 static int f2fs_release_file(struct inode *inode, struct file *filp)
1810 {
1811
1812
1813
1814
1815 if (!(filp->f_mode & FMODE_WRITE) ||
1816 atomic_read(&inode->i_writecount) != 1)
1817 return 0;
1818
1819 f2fs_abort_atomic_write(inode, true);
1820 return 0;
1821 }
1822
1823 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1824 {
1825 struct inode *inode = file_inode(file);
1826
1827
1828
1829
1830
1831
1832
1833 if (F2FS_I(inode)->atomic_write_task == current)
1834 f2fs_abort_atomic_write(inode, true);
1835 return 0;
1836 }
1837
1838 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1839 {
1840 struct f2fs_inode_info *fi = F2FS_I(inode);
1841 u32 masked_flags = fi->i_flags & mask;
1842
1843
1844 iflags &= mask;
1845
1846
1847 if (IS_NOQUOTA(inode))
1848 return -EPERM;
1849
1850 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1851 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1852 return -EOPNOTSUPP;
1853 if (!f2fs_empty_dir(inode))
1854 return -ENOTEMPTY;
1855 }
1856
1857 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1858 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1859 return -EOPNOTSUPP;
1860 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1861 return -EINVAL;
1862 }
1863
1864 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1865 if (masked_flags & F2FS_COMPR_FL) {
1866 if (!f2fs_disable_compressed_file(inode))
1867 return -EINVAL;
1868 } else {
1869 if (!f2fs_may_compress(inode))
1870 return -EINVAL;
1871 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
1872 return -EINVAL;
1873 if (set_compress_context(inode))
1874 return -EOPNOTSUPP;
1875 }
1876 }
1877
1878 fi->i_flags = iflags | (fi->i_flags & ~mask);
1879 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1880 (fi->i_flags & F2FS_NOCOMP_FL));
1881
1882 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1883 set_inode_flag(inode, FI_PROJ_INHERIT);
1884 else
1885 clear_inode_flag(inode, FI_PROJ_INHERIT);
1886
1887 inode->i_ctime = current_time(inode);
1888 f2fs_set_inode_flags(inode);
1889 f2fs_mark_inode_dirty_sync(inode, true);
1890 return 0;
1891 }
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905 static const struct {
1906 u32 iflag;
1907 u32 fsflag;
1908 } f2fs_fsflags_map[] = {
1909 { F2FS_COMPR_FL, FS_COMPR_FL },
1910 { F2FS_SYNC_FL, FS_SYNC_FL },
1911 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1912 { F2FS_APPEND_FL, FS_APPEND_FL },
1913 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1914 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1915 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1916 { F2FS_INDEX_FL, FS_INDEX_FL },
1917 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1918 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1919 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1920 };
1921
1922 #define F2FS_GETTABLE_FS_FL ( \
1923 FS_COMPR_FL | \
1924 FS_SYNC_FL | \
1925 FS_IMMUTABLE_FL | \
1926 FS_APPEND_FL | \
1927 FS_NODUMP_FL | \
1928 FS_NOATIME_FL | \
1929 FS_NOCOMP_FL | \
1930 FS_INDEX_FL | \
1931 FS_DIRSYNC_FL | \
1932 FS_PROJINHERIT_FL | \
1933 FS_ENCRYPT_FL | \
1934 FS_INLINE_DATA_FL | \
1935 FS_NOCOW_FL | \
1936 FS_VERITY_FL | \
1937 FS_CASEFOLD_FL)
1938
1939 #define F2FS_SETTABLE_FS_FL ( \
1940 FS_COMPR_FL | \
1941 FS_SYNC_FL | \
1942 FS_IMMUTABLE_FL | \
1943 FS_APPEND_FL | \
1944 FS_NODUMP_FL | \
1945 FS_NOATIME_FL | \
1946 FS_NOCOMP_FL | \
1947 FS_DIRSYNC_FL | \
1948 FS_PROJINHERIT_FL | \
1949 FS_CASEFOLD_FL)
1950
1951
1952 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1953 {
1954 u32 fsflags = 0;
1955 int i;
1956
1957 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1958 if (iflags & f2fs_fsflags_map[i].iflag)
1959 fsflags |= f2fs_fsflags_map[i].fsflag;
1960
1961 return fsflags;
1962 }
1963
1964
1965 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1966 {
1967 u32 iflags = 0;
1968 int i;
1969
1970 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1971 if (fsflags & f2fs_fsflags_map[i].fsflag)
1972 iflags |= f2fs_fsflags_map[i].iflag;
1973
1974 return iflags;
1975 }
1976
1977 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1978 {
1979 struct inode *inode = file_inode(filp);
1980
1981 return put_user(inode->i_generation, (int __user *)arg);
1982 }
1983
1984 static int f2fs_ioc_start_atomic_write(struct file *filp)
1985 {
1986 struct inode *inode = file_inode(filp);
1987 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
1988 struct f2fs_inode_info *fi = F2FS_I(inode);
1989 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1990 struct inode *pinode;
1991 int ret;
1992
1993 if (!inode_owner_or_capable(mnt_userns, inode))
1994 return -EACCES;
1995
1996 if (!S_ISREG(inode->i_mode))
1997 return -EINVAL;
1998
1999 if (filp->f_flags & O_DIRECT)
2000 return -EINVAL;
2001
2002 ret = mnt_want_write_file(filp);
2003 if (ret)
2004 return ret;
2005
2006 inode_lock(inode);
2007
2008 if (!f2fs_disable_compressed_file(inode)) {
2009 ret = -EINVAL;
2010 goto out;
2011 }
2012
2013 if (f2fs_is_atomic_file(inode))
2014 goto out;
2015
2016 ret = f2fs_convert_inline_inode(inode);
2017 if (ret)
2018 goto out;
2019
2020 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2021
2022
2023
2024
2025
2026 if (get_dirty_pages(inode))
2027 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2028 inode->i_ino, get_dirty_pages(inode));
2029 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2030 if (ret) {
2031 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2032 goto out;
2033 }
2034
2035
2036 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2037 if (IS_ERR(pinode)) {
2038 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2039 ret = PTR_ERR(pinode);
2040 goto out;
2041 }
2042
2043 ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
2044 iput(pinode);
2045 if (ret) {
2046 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2047 goto out;
2048 }
2049 f2fs_i_size_write(fi->cow_inode, i_size_read(inode));
2050
2051 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2052 sbi->atomic_files++;
2053 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2054
2055 set_inode_flag(inode, FI_ATOMIC_FILE);
2056 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2057 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2058 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2059
2060 f2fs_update_time(sbi, REQ_TIME);
2061 fi->atomic_write_task = current;
2062 stat_update_max_atomic_write(inode);
2063 fi->atomic_write_cnt = 0;
2064 out:
2065 inode_unlock(inode);
2066 mnt_drop_write_file(filp);
2067 return ret;
2068 }
2069
2070 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2071 {
2072 struct inode *inode = file_inode(filp);
2073 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
2074 int ret;
2075
2076 if (!inode_owner_or_capable(mnt_userns, inode))
2077 return -EACCES;
2078
2079 ret = mnt_want_write_file(filp);
2080 if (ret)
2081 return ret;
2082
2083 f2fs_balance_fs(F2FS_I_SB(inode), true);
2084
2085 inode_lock(inode);
2086
2087 if (f2fs_is_atomic_file(inode)) {
2088 ret = f2fs_commit_atomic_write(inode);
2089 if (ret)
2090 goto unlock_out;
2091
2092 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2093 if (!ret)
2094 f2fs_abort_atomic_write(inode, false);
2095 } else {
2096 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2097 }
2098 unlock_out:
2099 inode_unlock(inode);
2100 mnt_drop_write_file(filp);
2101 return ret;
2102 }
2103
2104 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2105 {
2106 struct inode *inode = file_inode(filp);
2107 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
2108 int ret;
2109
2110 if (!inode_owner_or_capable(mnt_userns, inode))
2111 return -EACCES;
2112
2113 ret = mnt_want_write_file(filp);
2114 if (ret)
2115 return ret;
2116
2117 inode_lock(inode);
2118
2119 f2fs_abort_atomic_write(inode, true);
2120
2121 inode_unlock(inode);
2122
2123 mnt_drop_write_file(filp);
2124 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2125 return ret;
2126 }
2127
2128 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2129 {
2130 struct inode *inode = file_inode(filp);
2131 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2132 struct super_block *sb = sbi->sb;
2133 __u32 in;
2134 int ret = 0;
2135
2136 if (!capable(CAP_SYS_ADMIN))
2137 return -EPERM;
2138
2139 if (get_user(in, (__u32 __user *)arg))
2140 return -EFAULT;
2141
2142 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2143 ret = mnt_want_write_file(filp);
2144 if (ret) {
2145 if (ret == -EROFS) {
2146 ret = 0;
2147 f2fs_stop_checkpoint(sbi, false);
2148 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2149 trace_f2fs_shutdown(sbi, in, ret);
2150 }
2151 return ret;
2152 }
2153 }
2154
2155 switch (in) {
2156 case F2FS_GOING_DOWN_FULLSYNC:
2157 ret = freeze_bdev(sb->s_bdev);
2158 if (ret)
2159 goto out;
2160 f2fs_stop_checkpoint(sbi, false);
2161 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2162 thaw_bdev(sb->s_bdev);
2163 break;
2164 case F2FS_GOING_DOWN_METASYNC:
2165
2166 ret = f2fs_sync_fs(sb, 1);
2167 if (ret)
2168 goto out;
2169 f2fs_stop_checkpoint(sbi, false);
2170 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2171 break;
2172 case F2FS_GOING_DOWN_NOSYNC:
2173 f2fs_stop_checkpoint(sbi, false);
2174 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2175 break;
2176 case F2FS_GOING_DOWN_METAFLUSH:
2177 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2178 f2fs_stop_checkpoint(sbi, false);
2179 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2180 break;
2181 case F2FS_GOING_DOWN_NEED_FSCK:
2182 set_sbi_flag(sbi, SBI_NEED_FSCK);
2183 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2184 set_sbi_flag(sbi, SBI_IS_DIRTY);
2185
2186 ret = f2fs_sync_fs(sb, 1);
2187 goto out;
2188 default:
2189 ret = -EINVAL;
2190 goto out;
2191 }
2192
2193 f2fs_stop_gc_thread(sbi);
2194 f2fs_stop_discard_thread(sbi);
2195
2196 f2fs_drop_discard_cmd(sbi);
2197 clear_opt(sbi, DISCARD);
2198
2199 f2fs_update_time(sbi, REQ_TIME);
2200 out:
2201 if (in != F2FS_GOING_DOWN_FULLSYNC)
2202 mnt_drop_write_file(filp);
2203
2204 trace_f2fs_shutdown(sbi, in, ret);
2205
2206 return ret;
2207 }
2208
2209 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2210 {
2211 struct inode *inode = file_inode(filp);
2212 struct super_block *sb = inode->i_sb;
2213 struct fstrim_range range;
2214 int ret;
2215
2216 if (!capable(CAP_SYS_ADMIN))
2217 return -EPERM;
2218
2219 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2220 return -EOPNOTSUPP;
2221
2222 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2223 sizeof(range)))
2224 return -EFAULT;
2225
2226 ret = mnt_want_write_file(filp);
2227 if (ret)
2228 return ret;
2229
2230 range.minlen = max((unsigned int)range.minlen,
2231 bdev_discard_granularity(sb->s_bdev));
2232 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2233 mnt_drop_write_file(filp);
2234 if (ret < 0)
2235 return ret;
2236
2237 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2238 sizeof(range)))
2239 return -EFAULT;
2240 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2241 return 0;
2242 }
2243
2244 static bool uuid_is_nonzero(__u8 u[16])
2245 {
2246 int i;
2247
2248 for (i = 0; i < 16; i++)
2249 if (u[i])
2250 return true;
2251 return false;
2252 }
2253
2254 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2255 {
2256 struct inode *inode = file_inode(filp);
2257
2258 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2259 return -EOPNOTSUPP;
2260
2261 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2262
2263 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2264 }
2265
2266 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2267 {
2268 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2269 return -EOPNOTSUPP;
2270 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2271 }
2272
2273 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2274 {
2275 struct inode *inode = file_inode(filp);
2276 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2277 int err;
2278
2279 if (!f2fs_sb_has_encrypt(sbi))
2280 return -EOPNOTSUPP;
2281
2282 err = mnt_want_write_file(filp);
2283 if (err)
2284 return err;
2285
2286 f2fs_down_write(&sbi->sb_lock);
2287
2288 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2289 goto got_it;
2290
2291
2292 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2293
2294 err = f2fs_commit_super(sbi, false);
2295 if (err) {
2296
2297 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2298 goto out_err;
2299 }
2300 got_it:
2301 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2302 16))
2303 err = -EFAULT;
2304 out_err:
2305 f2fs_up_write(&sbi->sb_lock);
2306 mnt_drop_write_file(filp);
2307 return err;
2308 }
2309
2310 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2311 unsigned long arg)
2312 {
2313 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2314 return -EOPNOTSUPP;
2315
2316 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2317 }
2318
2319 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2320 {
2321 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2322 return -EOPNOTSUPP;
2323
2324 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2325 }
2326
2327 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2328 {
2329 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2330 return -EOPNOTSUPP;
2331
2332 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2333 }
2334
2335 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2336 unsigned long arg)
2337 {
2338 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2339 return -EOPNOTSUPP;
2340
2341 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2342 }
2343
2344 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2345 unsigned long arg)
2346 {
2347 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2348 return -EOPNOTSUPP;
2349
2350 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2351 }
2352
2353 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2354 {
2355 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2356 return -EOPNOTSUPP;
2357
2358 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2359 }
2360
2361 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2362 {
2363 struct inode *inode = file_inode(filp);
2364 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2365 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2366 .no_bg_gc = false,
2367 .should_migrate_blocks = false,
2368 .nr_free_secs = 0 };
2369 __u32 sync;
2370 int ret;
2371
2372 if (!capable(CAP_SYS_ADMIN))
2373 return -EPERM;
2374
2375 if (get_user(sync, (__u32 __user *)arg))
2376 return -EFAULT;
2377
2378 if (f2fs_readonly(sbi->sb))
2379 return -EROFS;
2380
2381 ret = mnt_want_write_file(filp);
2382 if (ret)
2383 return ret;
2384
2385 if (!sync) {
2386 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2387 ret = -EBUSY;
2388 goto out;
2389 }
2390 } else {
2391 f2fs_down_write(&sbi->gc_lock);
2392 }
2393
2394 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2395 gc_control.err_gc_skipped = sync;
2396 ret = f2fs_gc(sbi, &gc_control);
2397 out:
2398 mnt_drop_write_file(filp);
2399 return ret;
2400 }
2401
2402 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2403 {
2404 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2405 struct f2fs_gc_control gc_control = {
2406 .init_gc_type = range->sync ? FG_GC : BG_GC,
2407 .no_bg_gc = false,
2408 .should_migrate_blocks = false,
2409 .err_gc_skipped = range->sync,
2410 .nr_free_secs = 0 };
2411 u64 end;
2412 int ret;
2413
2414 if (!capable(CAP_SYS_ADMIN))
2415 return -EPERM;
2416 if (f2fs_readonly(sbi->sb))
2417 return -EROFS;
2418
2419 end = range->start + range->len;
2420 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2421 end >= MAX_BLKADDR(sbi))
2422 return -EINVAL;
2423
2424 ret = mnt_want_write_file(filp);
2425 if (ret)
2426 return ret;
2427
2428 do_more:
2429 if (!range->sync) {
2430 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2431 ret = -EBUSY;
2432 goto out;
2433 }
2434 } else {
2435 f2fs_down_write(&sbi->gc_lock);
2436 }
2437
2438 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2439 ret = f2fs_gc(sbi, &gc_control);
2440 if (ret) {
2441 if (ret == -EBUSY)
2442 ret = -EAGAIN;
2443 goto out;
2444 }
2445 range->start += CAP_BLKS_PER_SEC(sbi);
2446 if (range->start <= end)
2447 goto do_more;
2448 out:
2449 mnt_drop_write_file(filp);
2450 return ret;
2451 }
2452
2453 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2454 {
2455 struct f2fs_gc_range range;
2456
2457 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2458 sizeof(range)))
2459 return -EFAULT;
2460 return __f2fs_ioc_gc_range(filp, &range);
2461 }
2462
2463 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2464 {
2465 struct inode *inode = file_inode(filp);
2466 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2467 int ret;
2468
2469 if (!capable(CAP_SYS_ADMIN))
2470 return -EPERM;
2471
2472 if (f2fs_readonly(sbi->sb))
2473 return -EROFS;
2474
2475 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2476 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2477 return -EINVAL;
2478 }
2479
2480 ret = mnt_want_write_file(filp);
2481 if (ret)
2482 return ret;
2483
2484 ret = f2fs_sync_fs(sbi->sb, 1);
2485
2486 mnt_drop_write_file(filp);
2487 return ret;
2488 }
2489
2490 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2491 struct file *filp,
2492 struct f2fs_defragment *range)
2493 {
2494 struct inode *inode = file_inode(filp);
2495 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2496 .m_seg_type = NO_CHECK_TYPE,
2497 .m_may_create = false };
2498 struct extent_info ei = {0, 0, 0};
2499 pgoff_t pg_start, pg_end, next_pgofs;
2500 unsigned int blk_per_seg = sbi->blocks_per_seg;
2501 unsigned int total = 0, sec_num;
2502 block_t blk_end = 0;
2503 bool fragmented = false;
2504 int err;
2505
2506 pg_start = range->start >> PAGE_SHIFT;
2507 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2508
2509 f2fs_balance_fs(sbi, true);
2510
2511 inode_lock(inode);
2512
2513
2514 set_inode_flag(inode, FI_OPU_WRITE);
2515 if (f2fs_should_update_inplace(inode, NULL)) {
2516 err = -EINVAL;
2517 goto out;
2518 }
2519
2520
2521 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2522 range->start + range->len - 1);
2523 if (err)
2524 goto out;
2525
2526
2527
2528
2529
2530 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2531 if (ei.fofs + ei.len >= pg_end)
2532 goto out;
2533 }
2534
2535 map.m_lblk = pg_start;
2536 map.m_next_pgofs = &next_pgofs;
2537
2538
2539
2540
2541
2542
2543 while (map.m_lblk < pg_end) {
2544 map.m_len = pg_end - map.m_lblk;
2545 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2546 if (err)
2547 goto out;
2548
2549 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2550 map.m_lblk = next_pgofs;
2551 continue;
2552 }
2553
2554 if (blk_end && blk_end != map.m_pblk)
2555 fragmented = true;
2556
2557
2558 total += map.m_len;
2559
2560 blk_end = map.m_pblk + map.m_len;
2561
2562 map.m_lblk += map.m_len;
2563 }
2564
2565 if (!fragmented) {
2566 total = 0;
2567 goto out;
2568 }
2569
2570 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2571
2572
2573
2574
2575
2576
2577 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2578 err = -EAGAIN;
2579 goto out;
2580 }
2581
2582 map.m_lblk = pg_start;
2583 map.m_len = pg_end - pg_start;
2584 total = 0;
2585
2586 while (map.m_lblk < pg_end) {
2587 pgoff_t idx;
2588 int cnt = 0;
2589
2590 do_map:
2591 map.m_len = pg_end - map.m_lblk;
2592 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2593 if (err)
2594 goto clear_out;
2595
2596 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2597 map.m_lblk = next_pgofs;
2598 goto check;
2599 }
2600
2601 set_inode_flag(inode, FI_SKIP_WRITES);
2602
2603 idx = map.m_lblk;
2604 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2605 struct page *page;
2606
2607 page = f2fs_get_lock_data_page(inode, idx, true);
2608 if (IS_ERR(page)) {
2609 err = PTR_ERR(page);
2610 goto clear_out;
2611 }
2612
2613 set_page_dirty(page);
2614 set_page_private_gcing(page);
2615 f2fs_put_page(page, 1);
2616
2617 idx++;
2618 cnt++;
2619 total++;
2620 }
2621
2622 map.m_lblk = idx;
2623 check:
2624 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2625 goto do_map;
2626
2627 clear_inode_flag(inode, FI_SKIP_WRITES);
2628
2629 err = filemap_fdatawrite(inode->i_mapping);
2630 if (err)
2631 goto out;
2632 }
2633 clear_out:
2634 clear_inode_flag(inode, FI_SKIP_WRITES);
2635 out:
2636 clear_inode_flag(inode, FI_OPU_WRITE);
2637 inode_unlock(inode);
2638 if (!err)
2639 range->len = (u64)total << PAGE_SHIFT;
2640 return err;
2641 }
2642
2643 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2644 {
2645 struct inode *inode = file_inode(filp);
2646 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2647 struct f2fs_defragment range;
2648 int err;
2649
2650 if (!capable(CAP_SYS_ADMIN))
2651 return -EPERM;
2652
2653 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2654 return -EINVAL;
2655
2656 if (f2fs_readonly(sbi->sb))
2657 return -EROFS;
2658
2659 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2660 sizeof(range)))
2661 return -EFAULT;
2662
2663
2664 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2665 return -EINVAL;
2666
2667 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2668 max_file_blocks(inode)))
2669 return -EINVAL;
2670
2671 err = mnt_want_write_file(filp);
2672 if (err)
2673 return err;
2674
2675 err = f2fs_defragment_range(sbi, filp, &range);
2676 mnt_drop_write_file(filp);
2677
2678 f2fs_update_time(sbi, REQ_TIME);
2679 if (err < 0)
2680 return err;
2681
2682 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2683 sizeof(range)))
2684 return -EFAULT;
2685
2686 return 0;
2687 }
2688
2689 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2690 struct file *file_out, loff_t pos_out, size_t len)
2691 {
2692 struct inode *src = file_inode(file_in);
2693 struct inode *dst = file_inode(file_out);
2694 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2695 size_t olen = len, dst_max_i_size = 0;
2696 size_t dst_osize;
2697 int ret;
2698
2699 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2700 src->i_sb != dst->i_sb)
2701 return -EXDEV;
2702
2703 if (unlikely(f2fs_readonly(src->i_sb)))
2704 return -EROFS;
2705
2706 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2707 return -EINVAL;
2708
2709 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2710 return -EOPNOTSUPP;
2711
2712 if (pos_out < 0 || pos_in < 0)
2713 return -EINVAL;
2714
2715 if (src == dst) {
2716 if (pos_in == pos_out)
2717 return 0;
2718 if (pos_out > pos_in && pos_out < pos_in + len)
2719 return -EINVAL;
2720 }
2721
2722 inode_lock(src);
2723 if (src != dst) {
2724 ret = -EBUSY;
2725 if (!inode_trylock(dst))
2726 goto out;
2727 }
2728
2729 ret = -EINVAL;
2730 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2731 goto out_unlock;
2732 if (len == 0)
2733 olen = len = src->i_size - pos_in;
2734 if (pos_in + len == src->i_size)
2735 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2736 if (len == 0) {
2737 ret = 0;
2738 goto out_unlock;
2739 }
2740
2741 dst_osize = dst->i_size;
2742 if (pos_out + olen > dst->i_size)
2743 dst_max_i_size = pos_out + olen;
2744
2745
2746 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2747 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2748 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2749 goto out_unlock;
2750
2751 ret = f2fs_convert_inline_inode(src);
2752 if (ret)
2753 goto out_unlock;
2754
2755 ret = f2fs_convert_inline_inode(dst);
2756 if (ret)
2757 goto out_unlock;
2758
2759
2760 ret = filemap_write_and_wait_range(src->i_mapping,
2761 pos_in, pos_in + len);
2762 if (ret)
2763 goto out_unlock;
2764
2765 ret = filemap_write_and_wait_range(dst->i_mapping,
2766 pos_out, pos_out + len);
2767 if (ret)
2768 goto out_unlock;
2769
2770 f2fs_balance_fs(sbi, true);
2771
2772 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2773 if (src != dst) {
2774 ret = -EBUSY;
2775 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2776 goto out_src;
2777 }
2778
2779 f2fs_lock_op(sbi);
2780 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2781 pos_out >> F2FS_BLKSIZE_BITS,
2782 len >> F2FS_BLKSIZE_BITS, false);
2783
2784 if (!ret) {
2785 if (dst_max_i_size)
2786 f2fs_i_size_write(dst, dst_max_i_size);
2787 else if (dst_osize != dst->i_size)
2788 f2fs_i_size_write(dst, dst_osize);
2789 }
2790 f2fs_unlock_op(sbi);
2791
2792 if (src != dst)
2793 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2794 out_src:
2795 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2796 out_unlock:
2797 if (src != dst)
2798 inode_unlock(dst);
2799 out:
2800 inode_unlock(src);
2801 return ret;
2802 }
2803
2804 static int __f2fs_ioc_move_range(struct file *filp,
2805 struct f2fs_move_range *range)
2806 {
2807 struct fd dst;
2808 int err;
2809
2810 if (!(filp->f_mode & FMODE_READ) ||
2811 !(filp->f_mode & FMODE_WRITE))
2812 return -EBADF;
2813
2814 dst = fdget(range->dst_fd);
2815 if (!dst.file)
2816 return -EBADF;
2817
2818 if (!(dst.file->f_mode & FMODE_WRITE)) {
2819 err = -EBADF;
2820 goto err_out;
2821 }
2822
2823 err = mnt_want_write_file(filp);
2824 if (err)
2825 goto err_out;
2826
2827 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2828 range->pos_out, range->len);
2829
2830 mnt_drop_write_file(filp);
2831 err_out:
2832 fdput(dst);
2833 return err;
2834 }
2835
2836 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2837 {
2838 struct f2fs_move_range range;
2839
2840 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2841 sizeof(range)))
2842 return -EFAULT;
2843 return __f2fs_ioc_move_range(filp, &range);
2844 }
2845
2846 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2847 {
2848 struct inode *inode = file_inode(filp);
2849 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2850 struct sit_info *sm = SIT_I(sbi);
2851 unsigned int start_segno = 0, end_segno = 0;
2852 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2853 struct f2fs_flush_device range;
2854 struct f2fs_gc_control gc_control = {
2855 .init_gc_type = FG_GC,
2856 .should_migrate_blocks = true,
2857 .err_gc_skipped = true,
2858 .nr_free_secs = 0 };
2859 int ret;
2860
2861 if (!capable(CAP_SYS_ADMIN))
2862 return -EPERM;
2863
2864 if (f2fs_readonly(sbi->sb))
2865 return -EROFS;
2866
2867 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2868 return -EINVAL;
2869
2870 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2871 sizeof(range)))
2872 return -EFAULT;
2873
2874 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2875 __is_large_section(sbi)) {
2876 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2877 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2878 return -EINVAL;
2879 }
2880
2881 ret = mnt_want_write_file(filp);
2882 if (ret)
2883 return ret;
2884
2885 if (range.dev_num != 0)
2886 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2887 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2888
2889 start_segno = sm->last_victim[FLUSH_DEVICE];
2890 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2891 start_segno = dev_start_segno;
2892 end_segno = min(start_segno + range.segments, dev_end_segno);
2893
2894 while (start_segno < end_segno) {
2895 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2896 ret = -EBUSY;
2897 goto out;
2898 }
2899 sm->last_victim[GC_CB] = end_segno + 1;
2900 sm->last_victim[GC_GREEDY] = end_segno + 1;
2901 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2902
2903 gc_control.victim_segno = start_segno;
2904 ret = f2fs_gc(sbi, &gc_control);
2905 if (ret == -EAGAIN)
2906 ret = 0;
2907 else if (ret < 0)
2908 break;
2909 start_segno++;
2910 }
2911 out:
2912 mnt_drop_write_file(filp);
2913 return ret;
2914 }
2915
2916 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2917 {
2918 struct inode *inode = file_inode(filp);
2919 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2920
2921
2922 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2923
2924 return put_user(sb_feature, (u32 __user *)arg);
2925 }
2926
2927 #ifdef CONFIG_QUOTA
2928 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2929 {
2930 struct dquot *transfer_to[MAXQUOTAS] = {};
2931 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2932 struct super_block *sb = sbi->sb;
2933 int err = 0;
2934
2935 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2936 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2937 err = __dquot_transfer(inode, transfer_to);
2938 if (err)
2939 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2940 dqput(transfer_to[PRJQUOTA]);
2941 }
2942 return err;
2943 }
2944
2945 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2946 {
2947 struct f2fs_inode_info *fi = F2FS_I(inode);
2948 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2949 struct f2fs_inode *ri = NULL;
2950 kprojid_t kprojid;
2951 int err;
2952
2953 if (!f2fs_sb_has_project_quota(sbi)) {
2954 if (projid != F2FS_DEF_PROJID)
2955 return -EOPNOTSUPP;
2956 else
2957 return 0;
2958 }
2959
2960 if (!f2fs_has_extra_attr(inode))
2961 return -EOPNOTSUPP;
2962
2963 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2964
2965 if (projid_eq(kprojid, fi->i_projid))
2966 return 0;
2967
2968 err = -EPERM;
2969
2970 if (IS_NOQUOTA(inode))
2971 return err;
2972
2973 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
2974 return -EOVERFLOW;
2975
2976 err = f2fs_dquot_initialize(inode);
2977 if (err)
2978 return err;
2979
2980 f2fs_lock_op(sbi);
2981 err = f2fs_transfer_project_quota(inode, kprojid);
2982 if (err)
2983 goto out_unlock;
2984
2985 fi->i_projid = kprojid;
2986 inode->i_ctime = current_time(inode);
2987 f2fs_mark_inode_dirty_sync(inode, true);
2988 out_unlock:
2989 f2fs_unlock_op(sbi);
2990 return err;
2991 }
2992 #else
2993 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2994 {
2995 return 0;
2996 }
2997
2998 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2999 {
3000 if (projid != F2FS_DEF_PROJID)
3001 return -EOPNOTSUPP;
3002 return 0;
3003 }
3004 #endif
3005
3006 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3007 {
3008 struct inode *inode = d_inode(dentry);
3009 struct f2fs_inode_info *fi = F2FS_I(inode);
3010 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3011
3012 if (IS_ENCRYPTED(inode))
3013 fsflags |= FS_ENCRYPT_FL;
3014 if (IS_VERITY(inode))
3015 fsflags |= FS_VERITY_FL;
3016 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3017 fsflags |= FS_INLINE_DATA_FL;
3018 if (is_inode_flag_set(inode, FI_PIN_FILE))
3019 fsflags |= FS_NOCOW_FL;
3020
3021 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3022
3023 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3024 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3025
3026 return 0;
3027 }
3028
3029 int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3030 struct dentry *dentry, struct fileattr *fa)
3031 {
3032 struct inode *inode = d_inode(dentry);
3033 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3034 u32 iflags;
3035 int err;
3036
3037 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3038 return -EIO;
3039 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3040 return -ENOSPC;
3041 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3042 return -EOPNOTSUPP;
3043 fsflags &= F2FS_SETTABLE_FS_FL;
3044 if (!fa->flags_valid)
3045 mask &= FS_COMMON_FL;
3046
3047 iflags = f2fs_fsflags_to_iflags(fsflags);
3048 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3049 return -EOPNOTSUPP;
3050
3051 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3052 if (!err)
3053 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3054
3055 return err;
3056 }
3057
3058 int f2fs_pin_file_control(struct inode *inode, bool inc)
3059 {
3060 struct f2fs_inode_info *fi = F2FS_I(inode);
3061 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3062
3063
3064 if (inc)
3065 f2fs_i_gc_failures_write(inode,
3066 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3067
3068 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3069 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3070 __func__, inode->i_ino,
3071 fi->i_gc_failures[GC_FAILURE_PIN]);
3072 clear_inode_flag(inode, FI_PIN_FILE);
3073 return -EAGAIN;
3074 }
3075 return 0;
3076 }
3077
3078 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3079 {
3080 struct inode *inode = file_inode(filp);
3081 __u32 pin;
3082 int ret = 0;
3083
3084 if (get_user(pin, (__u32 __user *)arg))
3085 return -EFAULT;
3086
3087 if (!S_ISREG(inode->i_mode))
3088 return -EINVAL;
3089
3090 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3091 return -EROFS;
3092
3093 ret = mnt_want_write_file(filp);
3094 if (ret)
3095 return ret;
3096
3097 inode_lock(inode);
3098
3099 if (!pin) {
3100 clear_inode_flag(inode, FI_PIN_FILE);
3101 f2fs_i_gc_failures_write(inode, 0);
3102 goto done;
3103 }
3104
3105 if (f2fs_should_update_outplace(inode, NULL)) {
3106 ret = -EINVAL;
3107 goto out;
3108 }
3109
3110 if (f2fs_pin_file_control(inode, false)) {
3111 ret = -EAGAIN;
3112 goto out;
3113 }
3114
3115 ret = f2fs_convert_inline_inode(inode);
3116 if (ret)
3117 goto out;
3118
3119 if (!f2fs_disable_compressed_file(inode)) {
3120 ret = -EOPNOTSUPP;
3121 goto out;
3122 }
3123
3124 set_inode_flag(inode, FI_PIN_FILE);
3125 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3126 done:
3127 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3128 out:
3129 inode_unlock(inode);
3130 mnt_drop_write_file(filp);
3131 return ret;
3132 }
3133
3134 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3135 {
3136 struct inode *inode = file_inode(filp);
3137 __u32 pin = 0;
3138
3139 if (is_inode_flag_set(inode, FI_PIN_FILE))
3140 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3141 return put_user(pin, (u32 __user *)arg);
3142 }
3143
3144 int f2fs_precache_extents(struct inode *inode)
3145 {
3146 struct f2fs_inode_info *fi = F2FS_I(inode);
3147 struct f2fs_map_blocks map;
3148 pgoff_t m_next_extent;
3149 loff_t end;
3150 int err;
3151
3152 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3153 return -EOPNOTSUPP;
3154
3155 map.m_lblk = 0;
3156 map.m_next_pgofs = NULL;
3157 map.m_next_extent = &m_next_extent;
3158 map.m_seg_type = NO_CHECK_TYPE;
3159 map.m_may_create = false;
3160 end = max_file_blocks(inode);
3161
3162 while (map.m_lblk < end) {
3163 map.m_len = end - map.m_lblk;
3164
3165 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3166 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3167 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3168 if (err)
3169 return err;
3170
3171 map.m_lblk = m_next_extent;
3172 }
3173
3174 return 0;
3175 }
3176
3177 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3178 {
3179 return f2fs_precache_extents(file_inode(filp));
3180 }
3181
3182 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3183 {
3184 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3185 __u64 block_count;
3186
3187 if (!capable(CAP_SYS_ADMIN))
3188 return -EPERM;
3189
3190 if (f2fs_readonly(sbi->sb))
3191 return -EROFS;
3192
3193 if (copy_from_user(&block_count, (void __user *)arg,
3194 sizeof(block_count)))
3195 return -EFAULT;
3196
3197 return f2fs_resize_fs(sbi, block_count);
3198 }
3199
3200 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3201 {
3202 struct inode *inode = file_inode(filp);
3203
3204 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3205
3206 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3207 f2fs_warn(F2FS_I_SB(inode),
3208 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3209 inode->i_ino);
3210 return -EOPNOTSUPP;
3211 }
3212
3213 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3214 }
3215
3216 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3217 {
3218 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3219 return -EOPNOTSUPP;
3220
3221 return fsverity_ioctl_measure(filp, (void __user *)arg);
3222 }
3223
3224 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3225 {
3226 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3227 return -EOPNOTSUPP;
3228
3229 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3230 }
3231
3232 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3233 {
3234 struct inode *inode = file_inode(filp);
3235 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3236 char *vbuf;
3237 int count;
3238 int err = 0;
3239
3240 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3241 if (!vbuf)
3242 return -ENOMEM;
3243
3244 f2fs_down_read(&sbi->sb_lock);
3245 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3246 ARRAY_SIZE(sbi->raw_super->volume_name),
3247 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3248 f2fs_up_read(&sbi->sb_lock);
3249
3250 if (copy_to_user((char __user *)arg, vbuf,
3251 min(FSLABEL_MAX, count)))
3252 err = -EFAULT;
3253
3254 kfree(vbuf);
3255 return err;
3256 }
3257
3258 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3259 {
3260 struct inode *inode = file_inode(filp);
3261 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3262 char *vbuf;
3263 int err = 0;
3264
3265 if (!capable(CAP_SYS_ADMIN))
3266 return -EPERM;
3267
3268 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3269 if (IS_ERR(vbuf))
3270 return PTR_ERR(vbuf);
3271
3272 err = mnt_want_write_file(filp);
3273 if (err)
3274 goto out;
3275
3276 f2fs_down_write(&sbi->sb_lock);
3277
3278 memset(sbi->raw_super->volume_name, 0,
3279 sizeof(sbi->raw_super->volume_name));
3280 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3281 sbi->raw_super->volume_name,
3282 ARRAY_SIZE(sbi->raw_super->volume_name));
3283
3284 err = f2fs_commit_super(sbi, false);
3285
3286 f2fs_up_write(&sbi->sb_lock);
3287
3288 mnt_drop_write_file(filp);
3289 out:
3290 kfree(vbuf);
3291 return err;
3292 }
3293
3294 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3295 {
3296 struct inode *inode = file_inode(filp);
3297 __u64 blocks;
3298
3299 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3300 return -EOPNOTSUPP;
3301
3302 if (!f2fs_compressed_file(inode))
3303 return -EINVAL;
3304
3305 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3306 return put_user(blocks, (u64 __user *)arg);
3307 }
3308
3309 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3310 {
3311 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3312 unsigned int released_blocks = 0;
3313 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3314 block_t blkaddr;
3315 int i;
3316
3317 for (i = 0; i < count; i++) {
3318 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3319 dn->ofs_in_node + i);
3320
3321 if (!__is_valid_data_blkaddr(blkaddr))
3322 continue;
3323 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3324 DATA_GENERIC_ENHANCE)))
3325 return -EFSCORRUPTED;
3326 }
3327
3328 while (count) {
3329 int compr_blocks = 0;
3330
3331 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3332 blkaddr = f2fs_data_blkaddr(dn);
3333
3334 if (i == 0) {
3335 if (blkaddr == COMPRESS_ADDR)
3336 continue;
3337 dn->ofs_in_node += cluster_size;
3338 goto next;
3339 }
3340
3341 if (__is_valid_data_blkaddr(blkaddr))
3342 compr_blocks++;
3343
3344 if (blkaddr != NEW_ADDR)
3345 continue;
3346
3347 dn->data_blkaddr = NULL_ADDR;
3348 f2fs_set_data_blkaddr(dn);
3349 }
3350
3351 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3352 dec_valid_block_count(sbi, dn->inode,
3353 cluster_size - compr_blocks);
3354
3355 released_blocks += cluster_size - compr_blocks;
3356 next:
3357 count -= cluster_size;
3358 }
3359
3360 return released_blocks;
3361 }
3362
3363 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3364 {
3365 struct inode *inode = file_inode(filp);
3366 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3367 pgoff_t page_idx = 0, last_idx;
3368 unsigned int released_blocks = 0;
3369 int ret;
3370 int writecount;
3371
3372 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3373 return -EOPNOTSUPP;
3374
3375 if (!f2fs_compressed_file(inode))
3376 return -EINVAL;
3377
3378 if (f2fs_readonly(sbi->sb))
3379 return -EROFS;
3380
3381 ret = mnt_want_write_file(filp);
3382 if (ret)
3383 return ret;
3384
3385 f2fs_balance_fs(F2FS_I_SB(inode), true);
3386
3387 inode_lock(inode);
3388
3389 writecount = atomic_read(&inode->i_writecount);
3390 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3391 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3392 ret = -EBUSY;
3393 goto out;
3394 }
3395
3396 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3397 ret = -EINVAL;
3398 goto out;
3399 }
3400
3401 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3402 if (ret)
3403 goto out;
3404
3405 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3406 inode->i_ctime = current_time(inode);
3407 f2fs_mark_inode_dirty_sync(inode, true);
3408
3409 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3410 goto out;
3411
3412 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3413 filemap_invalidate_lock(inode->i_mapping);
3414
3415 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3416
3417 while (page_idx < last_idx) {
3418 struct dnode_of_data dn;
3419 pgoff_t end_offset, count;
3420
3421 set_new_dnode(&dn, inode, NULL, NULL, 0);
3422 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3423 if (ret) {
3424 if (ret == -ENOENT) {
3425 page_idx = f2fs_get_next_page_offset(&dn,
3426 page_idx);
3427 ret = 0;
3428 continue;
3429 }
3430 break;
3431 }
3432
3433 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3434 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3435 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3436
3437 ret = release_compress_blocks(&dn, count);
3438
3439 f2fs_put_dnode(&dn);
3440
3441 if (ret < 0)
3442 break;
3443
3444 page_idx += count;
3445 released_blocks += ret;
3446 }
3447
3448 filemap_invalidate_unlock(inode->i_mapping);
3449 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3450 out:
3451 inode_unlock(inode);
3452
3453 mnt_drop_write_file(filp);
3454
3455 if (ret >= 0) {
3456 ret = put_user(released_blocks, (u64 __user *)arg);
3457 } else if (released_blocks &&
3458 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3459 set_sbi_flag(sbi, SBI_NEED_FSCK);
3460 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3461 "iblocks=%llu, released=%u, compr_blocks=%u, "
3462 "run fsck to fix.",
3463 __func__, inode->i_ino, inode->i_blocks,
3464 released_blocks,
3465 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3466 }
3467
3468 return ret;
3469 }
3470
3471 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3472 {
3473 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3474 unsigned int reserved_blocks = 0;
3475 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3476 block_t blkaddr;
3477 int i;
3478
3479 for (i = 0; i < count; i++) {
3480 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3481 dn->ofs_in_node + i);
3482
3483 if (!__is_valid_data_blkaddr(blkaddr))
3484 continue;
3485 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3486 DATA_GENERIC_ENHANCE)))
3487 return -EFSCORRUPTED;
3488 }
3489
3490 while (count) {
3491 int compr_blocks = 0;
3492 blkcnt_t reserved;
3493 int ret;
3494
3495 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3496 blkaddr = f2fs_data_blkaddr(dn);
3497
3498 if (i == 0) {
3499 if (blkaddr == COMPRESS_ADDR)
3500 continue;
3501 dn->ofs_in_node += cluster_size;
3502 goto next;
3503 }
3504
3505 if (__is_valid_data_blkaddr(blkaddr)) {
3506 compr_blocks++;
3507 continue;
3508 }
3509
3510 dn->data_blkaddr = NEW_ADDR;
3511 f2fs_set_data_blkaddr(dn);
3512 }
3513
3514 reserved = cluster_size - compr_blocks;
3515 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3516 if (ret)
3517 return ret;
3518
3519 if (reserved != cluster_size - compr_blocks)
3520 return -ENOSPC;
3521
3522 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3523
3524 reserved_blocks += reserved;
3525 next:
3526 count -= cluster_size;
3527 }
3528
3529 return reserved_blocks;
3530 }
3531
3532 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3533 {
3534 struct inode *inode = file_inode(filp);
3535 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3536 pgoff_t page_idx = 0, last_idx;
3537 unsigned int reserved_blocks = 0;
3538 int ret;
3539
3540 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3541 return -EOPNOTSUPP;
3542
3543 if (!f2fs_compressed_file(inode))
3544 return -EINVAL;
3545
3546 if (f2fs_readonly(sbi->sb))
3547 return -EROFS;
3548
3549 ret = mnt_want_write_file(filp);
3550 if (ret)
3551 return ret;
3552
3553 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3554 goto out;
3555
3556 f2fs_balance_fs(F2FS_I_SB(inode), true);
3557
3558 inode_lock(inode);
3559
3560 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3561 ret = -EINVAL;
3562 goto unlock_inode;
3563 }
3564
3565 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3566 filemap_invalidate_lock(inode->i_mapping);
3567
3568 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3569
3570 while (page_idx < last_idx) {
3571 struct dnode_of_data dn;
3572 pgoff_t end_offset, count;
3573
3574 set_new_dnode(&dn, inode, NULL, NULL, 0);
3575 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3576 if (ret) {
3577 if (ret == -ENOENT) {
3578 page_idx = f2fs_get_next_page_offset(&dn,
3579 page_idx);
3580 ret = 0;
3581 continue;
3582 }
3583 break;
3584 }
3585
3586 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3587 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3588 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3589
3590 ret = reserve_compress_blocks(&dn, count);
3591
3592 f2fs_put_dnode(&dn);
3593
3594 if (ret < 0)
3595 break;
3596
3597 page_idx += count;
3598 reserved_blocks += ret;
3599 }
3600
3601 filemap_invalidate_unlock(inode->i_mapping);
3602 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3603
3604 if (ret >= 0) {
3605 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3606 inode->i_ctime = current_time(inode);
3607 f2fs_mark_inode_dirty_sync(inode, true);
3608 }
3609 unlock_inode:
3610 inode_unlock(inode);
3611 out:
3612 mnt_drop_write_file(filp);
3613
3614 if (ret >= 0) {
3615 ret = put_user(reserved_blocks, (u64 __user *)arg);
3616 } else if (reserved_blocks &&
3617 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3618 set_sbi_flag(sbi, SBI_NEED_FSCK);
3619 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3620 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3621 "run fsck to fix.",
3622 __func__, inode->i_ino, inode->i_blocks,
3623 reserved_blocks,
3624 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3625 }
3626
3627 return ret;
3628 }
3629
3630 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3631 pgoff_t off, block_t block, block_t len, u32 flags)
3632 {
3633 sector_t sector = SECTOR_FROM_BLOCK(block);
3634 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3635 int ret = 0;
3636
3637 if (flags & F2FS_TRIM_FILE_DISCARD) {
3638 if (bdev_max_secure_erase_sectors(bdev))
3639 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3640 GFP_NOFS);
3641 else
3642 ret = blkdev_issue_discard(bdev, sector, nr_sects,
3643 GFP_NOFS);
3644 }
3645
3646 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3647 if (IS_ENCRYPTED(inode))
3648 ret = fscrypt_zeroout_range(inode, off, block, len);
3649 else
3650 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3651 GFP_NOFS, 0);
3652 }
3653
3654 return ret;
3655 }
3656
3657 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3658 {
3659 struct inode *inode = file_inode(filp);
3660 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3661 struct address_space *mapping = inode->i_mapping;
3662 struct block_device *prev_bdev = NULL;
3663 struct f2fs_sectrim_range range;
3664 pgoff_t index, pg_end, prev_index = 0;
3665 block_t prev_block = 0, len = 0;
3666 loff_t end_addr;
3667 bool to_end = false;
3668 int ret = 0;
3669
3670 if (!(filp->f_mode & FMODE_WRITE))
3671 return -EBADF;
3672
3673 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3674 sizeof(range)))
3675 return -EFAULT;
3676
3677 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3678 !S_ISREG(inode->i_mode))
3679 return -EINVAL;
3680
3681 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3682 !f2fs_hw_support_discard(sbi)) ||
3683 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3684 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3685 return -EOPNOTSUPP;
3686
3687 file_start_write(filp);
3688 inode_lock(inode);
3689
3690 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3691 range.start >= inode->i_size) {
3692 ret = -EINVAL;
3693 goto err;
3694 }
3695
3696 if (range.len == 0)
3697 goto err;
3698
3699 if (inode->i_size - range.start > range.len) {
3700 end_addr = range.start + range.len;
3701 } else {
3702 end_addr = range.len == (u64)-1 ?
3703 sbi->sb->s_maxbytes : inode->i_size;
3704 to_end = true;
3705 }
3706
3707 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3708 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3709 ret = -EINVAL;
3710 goto err;
3711 }
3712
3713 index = F2FS_BYTES_TO_BLK(range.start);
3714 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3715
3716 ret = f2fs_convert_inline_inode(inode);
3717 if (ret)
3718 goto err;
3719
3720 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3721 filemap_invalidate_lock(mapping);
3722
3723 ret = filemap_write_and_wait_range(mapping, range.start,
3724 to_end ? LLONG_MAX : end_addr - 1);
3725 if (ret)
3726 goto out;
3727
3728 truncate_inode_pages_range(mapping, range.start,
3729 to_end ? -1 : end_addr - 1);
3730
3731 while (index < pg_end) {
3732 struct dnode_of_data dn;
3733 pgoff_t end_offset, count;
3734 int i;
3735
3736 set_new_dnode(&dn, inode, NULL, NULL, 0);
3737 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3738 if (ret) {
3739 if (ret == -ENOENT) {
3740 index = f2fs_get_next_page_offset(&dn, index);
3741 continue;
3742 }
3743 goto out;
3744 }
3745
3746 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3747 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3748 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3749 struct block_device *cur_bdev;
3750 block_t blkaddr = f2fs_data_blkaddr(&dn);
3751
3752 if (!__is_valid_data_blkaddr(blkaddr))
3753 continue;
3754
3755 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3756 DATA_GENERIC_ENHANCE)) {
3757 ret = -EFSCORRUPTED;
3758 f2fs_put_dnode(&dn);
3759 goto out;
3760 }
3761
3762 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3763 if (f2fs_is_multi_device(sbi)) {
3764 int di = f2fs_target_device_index(sbi, blkaddr);
3765
3766 blkaddr -= FDEV(di).start_blk;
3767 }
3768
3769 if (len) {
3770 if (prev_bdev == cur_bdev &&
3771 index == prev_index + len &&
3772 blkaddr == prev_block + len) {
3773 len++;
3774 } else {
3775 ret = f2fs_secure_erase(prev_bdev,
3776 inode, prev_index, prev_block,
3777 len, range.flags);
3778 if (ret) {
3779 f2fs_put_dnode(&dn);
3780 goto out;
3781 }
3782
3783 len = 0;
3784 }
3785 }
3786
3787 if (!len) {
3788 prev_bdev = cur_bdev;
3789 prev_index = index;
3790 prev_block = blkaddr;
3791 len = 1;
3792 }
3793 }
3794
3795 f2fs_put_dnode(&dn);
3796
3797 if (fatal_signal_pending(current)) {
3798 ret = -EINTR;
3799 goto out;
3800 }
3801 cond_resched();
3802 }
3803
3804 if (len)
3805 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3806 prev_block, len, range.flags);
3807 out:
3808 filemap_invalidate_unlock(mapping);
3809 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3810 err:
3811 inode_unlock(inode);
3812 file_end_write(filp);
3813
3814 return ret;
3815 }
3816
3817 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3818 {
3819 struct inode *inode = file_inode(filp);
3820 struct f2fs_comp_option option;
3821
3822 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3823 return -EOPNOTSUPP;
3824
3825 inode_lock_shared(inode);
3826
3827 if (!f2fs_compressed_file(inode)) {
3828 inode_unlock_shared(inode);
3829 return -ENODATA;
3830 }
3831
3832 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3833 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3834
3835 inode_unlock_shared(inode);
3836
3837 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3838 sizeof(option)))
3839 return -EFAULT;
3840
3841 return 0;
3842 }
3843
3844 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3845 {
3846 struct inode *inode = file_inode(filp);
3847 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3848 struct f2fs_comp_option option;
3849 int ret = 0;
3850
3851 if (!f2fs_sb_has_compression(sbi))
3852 return -EOPNOTSUPP;
3853
3854 if (!(filp->f_mode & FMODE_WRITE))
3855 return -EBADF;
3856
3857 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3858 sizeof(option)))
3859 return -EFAULT;
3860
3861 if (!f2fs_compressed_file(inode) ||
3862 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3863 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3864 option.algorithm >= COMPRESS_MAX)
3865 return -EINVAL;
3866
3867 file_start_write(filp);
3868 inode_lock(inode);
3869
3870 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3871 ret = -EBUSY;
3872 goto out;
3873 }
3874
3875 if (inode->i_size != 0) {
3876 ret = -EFBIG;
3877 goto out;
3878 }
3879
3880 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3881 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3882 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3883 f2fs_mark_inode_dirty_sync(inode, true);
3884
3885 if (!f2fs_is_compress_backend_ready(inode))
3886 f2fs_warn(sbi, "compression algorithm is successfully set, "
3887 "but current kernel doesn't support this algorithm.");
3888 out:
3889 inode_unlock(inode);
3890 file_end_write(filp);
3891
3892 return ret;
3893 }
3894
3895 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3896 {
3897 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3898 struct address_space *mapping = inode->i_mapping;
3899 struct page *page;
3900 pgoff_t redirty_idx = page_idx;
3901 int i, page_len = 0, ret = 0;
3902
3903 page_cache_ra_unbounded(&ractl, len, 0);
3904
3905 for (i = 0; i < len; i++, page_idx++) {
3906 page = read_cache_page(mapping, page_idx, NULL, NULL);
3907 if (IS_ERR(page)) {
3908 ret = PTR_ERR(page);
3909 break;
3910 }
3911 page_len++;
3912 }
3913
3914 for (i = 0; i < page_len; i++, redirty_idx++) {
3915 page = find_lock_page(mapping, redirty_idx);
3916
3917
3918 f2fs_bug_on(F2FS_I_SB(inode), !page);
3919
3920 set_page_dirty(page);
3921 f2fs_put_page(page, 1);
3922 f2fs_put_page(page, 0);
3923 }
3924
3925 return ret;
3926 }
3927
3928 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
3929 {
3930 struct inode *inode = file_inode(filp);
3931 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3932 struct f2fs_inode_info *fi = F2FS_I(inode);
3933 pgoff_t page_idx = 0, last_idx;
3934 unsigned int blk_per_seg = sbi->blocks_per_seg;
3935 int cluster_size = fi->i_cluster_size;
3936 int count, ret;
3937
3938 if (!f2fs_sb_has_compression(sbi) ||
3939 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
3940 return -EOPNOTSUPP;
3941
3942 if (!(filp->f_mode & FMODE_WRITE))
3943 return -EBADF;
3944
3945 if (!f2fs_compressed_file(inode))
3946 return -EINVAL;
3947
3948 f2fs_balance_fs(F2FS_I_SB(inode), true);
3949
3950 file_start_write(filp);
3951 inode_lock(inode);
3952
3953 if (!f2fs_is_compress_backend_ready(inode)) {
3954 ret = -EOPNOTSUPP;
3955 goto out;
3956 }
3957
3958 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3959 ret = -EINVAL;
3960 goto out;
3961 }
3962
3963 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3964 if (ret)
3965 goto out;
3966
3967 if (!atomic_read(&fi->i_compr_blocks))
3968 goto out;
3969
3970 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3971
3972 count = last_idx - page_idx;
3973 while (count) {
3974 int len = min(cluster_size, count);
3975
3976 ret = redirty_blocks(inode, page_idx, len);
3977 if (ret < 0)
3978 break;
3979
3980 if (get_dirty_pages(inode) >= blk_per_seg)
3981 filemap_fdatawrite(inode->i_mapping);
3982
3983 count -= len;
3984 page_idx += len;
3985 }
3986
3987 if (!ret)
3988 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
3989 LLONG_MAX);
3990
3991 if (ret)
3992 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
3993 __func__, ret);
3994 out:
3995 inode_unlock(inode);
3996 file_end_write(filp);
3997
3998 return ret;
3999 }
4000
4001 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4002 {
4003 struct inode *inode = file_inode(filp);
4004 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4005 pgoff_t page_idx = 0, last_idx;
4006 unsigned int blk_per_seg = sbi->blocks_per_seg;
4007 int cluster_size = F2FS_I(inode)->i_cluster_size;
4008 int count, ret;
4009
4010 if (!f2fs_sb_has_compression(sbi) ||
4011 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4012 return -EOPNOTSUPP;
4013
4014 if (!(filp->f_mode & FMODE_WRITE))
4015 return -EBADF;
4016
4017 if (!f2fs_compressed_file(inode))
4018 return -EINVAL;
4019
4020 f2fs_balance_fs(F2FS_I_SB(inode), true);
4021
4022 file_start_write(filp);
4023 inode_lock(inode);
4024
4025 if (!f2fs_is_compress_backend_ready(inode)) {
4026 ret = -EOPNOTSUPP;
4027 goto out;
4028 }
4029
4030 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4031 ret = -EINVAL;
4032 goto out;
4033 }
4034
4035 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4036 if (ret)
4037 goto out;
4038
4039 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4040
4041 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4042
4043 count = last_idx - page_idx;
4044 while (count) {
4045 int len = min(cluster_size, count);
4046
4047 ret = redirty_blocks(inode, page_idx, len);
4048 if (ret < 0)
4049 break;
4050
4051 if (get_dirty_pages(inode) >= blk_per_seg)
4052 filemap_fdatawrite(inode->i_mapping);
4053
4054 count -= len;
4055 page_idx += len;
4056 }
4057
4058 if (!ret)
4059 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4060 LLONG_MAX);
4061
4062 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4063
4064 if (ret)
4065 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4066 __func__, ret);
4067 out:
4068 inode_unlock(inode);
4069 file_end_write(filp);
4070
4071 return ret;
4072 }
4073
4074 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4075 {
4076 switch (cmd) {
4077 case FS_IOC_GETVERSION:
4078 return f2fs_ioc_getversion(filp, arg);
4079 case F2FS_IOC_START_ATOMIC_WRITE:
4080 return f2fs_ioc_start_atomic_write(filp);
4081 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4082 return f2fs_ioc_commit_atomic_write(filp);
4083 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4084 return f2fs_ioc_abort_atomic_write(filp);
4085 case F2FS_IOC_START_VOLATILE_WRITE:
4086 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4087 return -EOPNOTSUPP;
4088 case F2FS_IOC_SHUTDOWN:
4089 return f2fs_ioc_shutdown(filp, arg);
4090 case FITRIM:
4091 return f2fs_ioc_fitrim(filp, arg);
4092 case FS_IOC_SET_ENCRYPTION_POLICY:
4093 return f2fs_ioc_set_encryption_policy(filp, arg);
4094 case FS_IOC_GET_ENCRYPTION_POLICY:
4095 return f2fs_ioc_get_encryption_policy(filp, arg);
4096 case FS_IOC_GET_ENCRYPTION_PWSALT:
4097 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4098 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4099 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4100 case FS_IOC_ADD_ENCRYPTION_KEY:
4101 return f2fs_ioc_add_encryption_key(filp, arg);
4102 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4103 return f2fs_ioc_remove_encryption_key(filp, arg);
4104 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4105 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4106 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4107 return f2fs_ioc_get_encryption_key_status(filp, arg);
4108 case FS_IOC_GET_ENCRYPTION_NONCE:
4109 return f2fs_ioc_get_encryption_nonce(filp, arg);
4110 case F2FS_IOC_GARBAGE_COLLECT:
4111 return f2fs_ioc_gc(filp, arg);
4112 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4113 return f2fs_ioc_gc_range(filp, arg);
4114 case F2FS_IOC_WRITE_CHECKPOINT:
4115 return f2fs_ioc_write_checkpoint(filp, arg);
4116 case F2FS_IOC_DEFRAGMENT:
4117 return f2fs_ioc_defragment(filp, arg);
4118 case F2FS_IOC_MOVE_RANGE:
4119 return f2fs_ioc_move_range(filp, arg);
4120 case F2FS_IOC_FLUSH_DEVICE:
4121 return f2fs_ioc_flush_device(filp, arg);
4122 case F2FS_IOC_GET_FEATURES:
4123 return f2fs_ioc_get_features(filp, arg);
4124 case F2FS_IOC_GET_PIN_FILE:
4125 return f2fs_ioc_get_pin_file(filp, arg);
4126 case F2FS_IOC_SET_PIN_FILE:
4127 return f2fs_ioc_set_pin_file(filp, arg);
4128 case F2FS_IOC_PRECACHE_EXTENTS:
4129 return f2fs_ioc_precache_extents(filp, arg);
4130 case F2FS_IOC_RESIZE_FS:
4131 return f2fs_ioc_resize_fs(filp, arg);
4132 case FS_IOC_ENABLE_VERITY:
4133 return f2fs_ioc_enable_verity(filp, arg);
4134 case FS_IOC_MEASURE_VERITY:
4135 return f2fs_ioc_measure_verity(filp, arg);
4136 case FS_IOC_READ_VERITY_METADATA:
4137 return f2fs_ioc_read_verity_metadata(filp, arg);
4138 case FS_IOC_GETFSLABEL:
4139 return f2fs_ioc_getfslabel(filp, arg);
4140 case FS_IOC_SETFSLABEL:
4141 return f2fs_ioc_setfslabel(filp, arg);
4142 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4143 return f2fs_get_compress_blocks(filp, arg);
4144 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4145 return f2fs_release_compress_blocks(filp, arg);
4146 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4147 return f2fs_reserve_compress_blocks(filp, arg);
4148 case F2FS_IOC_SEC_TRIM_FILE:
4149 return f2fs_sec_trim_file(filp, arg);
4150 case F2FS_IOC_GET_COMPRESS_OPTION:
4151 return f2fs_ioc_get_compress_option(filp, arg);
4152 case F2FS_IOC_SET_COMPRESS_OPTION:
4153 return f2fs_ioc_set_compress_option(filp, arg);
4154 case F2FS_IOC_DECOMPRESS_FILE:
4155 return f2fs_ioc_decompress_file(filp, arg);
4156 case F2FS_IOC_COMPRESS_FILE:
4157 return f2fs_ioc_compress_file(filp, arg);
4158 default:
4159 return -ENOTTY;
4160 }
4161 }
4162
4163 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4164 {
4165 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4166 return -EIO;
4167 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4168 return -ENOSPC;
4169
4170 return __f2fs_ioctl(filp, cmd, arg);
4171 }
4172
4173
4174
4175
4176
4177 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4178 struct iov_iter *iter)
4179 {
4180 unsigned int align;
4181
4182 if (!(iocb->ki_flags & IOCB_DIRECT))
4183 return false;
4184
4185 if (f2fs_force_buffered_io(inode, iocb, iter))
4186 return false;
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199 align = iocb->ki_pos | iov_iter_alignment(iter);
4200 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4201 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4202 return false;
4203
4204 return true;
4205 }
4206
4207 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4208 unsigned int flags)
4209 {
4210 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4211
4212 dec_page_count(sbi, F2FS_DIO_READ);
4213 if (error)
4214 return error;
4215 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, size);
4216 return 0;
4217 }
4218
4219 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4220 .end_io = f2fs_dio_read_end_io,
4221 };
4222
4223 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4224 {
4225 struct file *file = iocb->ki_filp;
4226 struct inode *inode = file_inode(file);
4227 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4228 struct f2fs_inode_info *fi = F2FS_I(inode);
4229 const loff_t pos = iocb->ki_pos;
4230 const size_t count = iov_iter_count(to);
4231 struct iomap_dio *dio;
4232 ssize_t ret;
4233
4234 if (count == 0)
4235 return 0;
4236
4237 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4238
4239 if (iocb->ki_flags & IOCB_NOWAIT) {
4240 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4241 ret = -EAGAIN;
4242 goto out;
4243 }
4244 } else {
4245 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4246 }
4247
4248
4249
4250
4251
4252
4253 inc_page_count(sbi, F2FS_DIO_READ);
4254 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4255 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4256 if (IS_ERR_OR_NULL(dio)) {
4257 ret = PTR_ERR_OR_ZERO(dio);
4258 if (ret != -EIOCBQUEUED)
4259 dec_page_count(sbi, F2FS_DIO_READ);
4260 } else {
4261 ret = iomap_dio_complete(dio);
4262 }
4263
4264 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4265
4266 file_accessed(file);
4267 out:
4268 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4269 return ret;
4270 }
4271
4272 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4273 {
4274 struct inode *inode = file_inode(iocb->ki_filp);
4275 const loff_t pos = iocb->ki_pos;
4276 ssize_t ret;
4277
4278 if (!f2fs_is_compress_backend_ready(inode))
4279 return -EOPNOTSUPP;
4280
4281 if (trace_f2fs_dataread_start_enabled()) {
4282 char *p = f2fs_kmalloc(F2FS_I_SB(inode), PATH_MAX, GFP_KERNEL);
4283 char *path;
4284
4285 if (!p)
4286 goto skip_read_trace;
4287
4288 path = dentry_path_raw(file_dentry(iocb->ki_filp), p, PATH_MAX);
4289 if (IS_ERR(path)) {
4290 kfree(p);
4291 goto skip_read_trace;
4292 }
4293
4294 trace_f2fs_dataread_start(inode, pos, iov_iter_count(to),
4295 current->pid, path, current->comm);
4296 kfree(p);
4297 }
4298 skip_read_trace:
4299 if (f2fs_should_use_dio(inode, iocb, to)) {
4300 ret = f2fs_dio_read_iter(iocb, to);
4301 } else {
4302 ret = filemap_read(iocb, to, 0);
4303 if (ret > 0)
4304 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
4305 }
4306 if (trace_f2fs_dataread_end_enabled())
4307 trace_f2fs_dataread_end(inode, pos, ret);
4308 return ret;
4309 }
4310
4311 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4312 {
4313 struct file *file = iocb->ki_filp;
4314 struct inode *inode = file_inode(file);
4315 ssize_t count;
4316 int err;
4317
4318 if (IS_IMMUTABLE(inode))
4319 return -EPERM;
4320
4321 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4322 return -EPERM;
4323
4324 count = generic_write_checks(iocb, from);
4325 if (count <= 0)
4326 return count;
4327
4328 err = file_modified(file);
4329 if (err)
4330 return err;
4331 return count;
4332 }
4333
4334
4335
4336
4337
4338
4339
4340
4341 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4342 bool dio)
4343 {
4344 struct inode *inode = file_inode(iocb->ki_filp);
4345 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4346 const loff_t pos = iocb->ki_pos;
4347 const size_t count = iov_iter_count(iter);
4348 struct f2fs_map_blocks map = {};
4349 int flag;
4350 int ret;
4351
4352
4353 if (dio && f2fs_lfs_mode(sbi))
4354 return 0;
4355
4356
4357
4358
4359 if (dio && i_size_read(inode) &&
4360 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4361 return 0;
4362
4363
4364 if (iocb->ki_flags & IOCB_NOWAIT)
4365 return 0;
4366
4367
4368 if (fault_in_iov_iter_readable(iter, count))
4369 return 0;
4370
4371 if (f2fs_has_inline_data(inode)) {
4372
4373 if (pos + count <= MAX_INLINE_DATA(inode))
4374 return 0;
4375 ret = f2fs_convert_inline_inode(inode);
4376 if (ret)
4377 return ret;
4378 }
4379
4380
4381 map.m_lblk = F2FS_BLK_ALIGN(pos);
4382 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4383 if (map.m_len > map.m_lblk)
4384 map.m_len -= map.m_lblk;
4385 else
4386 map.m_len = 0;
4387 map.m_may_create = true;
4388 if (dio) {
4389 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4390 flag = F2FS_GET_BLOCK_PRE_DIO;
4391 } else {
4392 map.m_seg_type = NO_CHECK_TYPE;
4393 flag = F2FS_GET_BLOCK_PRE_AIO;
4394 }
4395
4396 ret = f2fs_map_blocks(inode, &map, 1, flag);
4397
4398 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4399 return ret;
4400 if (ret == 0)
4401 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4402 return map.m_len;
4403 }
4404
4405 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4406 struct iov_iter *from)
4407 {
4408 struct file *file = iocb->ki_filp;
4409 struct inode *inode = file_inode(file);
4410 ssize_t ret;
4411
4412 if (iocb->ki_flags & IOCB_NOWAIT)
4413 return -EOPNOTSUPP;
4414
4415 current->backing_dev_info = inode_to_bdi(inode);
4416 ret = generic_perform_write(iocb, from);
4417 current->backing_dev_info = NULL;
4418
4419 if (ret > 0) {
4420 iocb->ki_pos += ret;
4421 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_IO, ret);
4422 }
4423 return ret;
4424 }
4425
4426 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4427 unsigned int flags)
4428 {
4429 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4430
4431 dec_page_count(sbi, F2FS_DIO_WRITE);
4432 if (error)
4433 return error;
4434 f2fs_update_iostat(sbi, APP_DIRECT_IO, size);
4435 return 0;
4436 }
4437
4438 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4439 .end_io = f2fs_dio_write_end_io,
4440 };
4441
4442 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4443 bool *may_need_sync)
4444 {
4445 struct file *file = iocb->ki_filp;
4446 struct inode *inode = file_inode(file);
4447 struct f2fs_inode_info *fi = F2FS_I(inode);
4448 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4449 const bool do_opu = f2fs_lfs_mode(sbi);
4450 const loff_t pos = iocb->ki_pos;
4451 const ssize_t count = iov_iter_count(from);
4452 unsigned int dio_flags;
4453 struct iomap_dio *dio;
4454 ssize_t ret;
4455
4456 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4457
4458 if (iocb->ki_flags & IOCB_NOWAIT) {
4459
4460 if (f2fs_has_inline_data(inode) ||
4461 !f2fs_overwrite_io(inode, pos, count)) {
4462 ret = -EAGAIN;
4463 goto out;
4464 }
4465
4466 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4467 ret = -EAGAIN;
4468 goto out;
4469 }
4470 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4471 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4472 ret = -EAGAIN;
4473 goto out;
4474 }
4475 } else {
4476 ret = f2fs_convert_inline_inode(inode);
4477 if (ret)
4478 goto out;
4479
4480 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4481 if (do_opu)
4482 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4483 }
4484
4485
4486
4487
4488
4489
4490 inc_page_count(sbi, F2FS_DIO_WRITE);
4491 dio_flags = 0;
4492 if (pos + count > inode->i_size)
4493 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4494 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4495 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4496 if (IS_ERR_OR_NULL(dio)) {
4497 ret = PTR_ERR_OR_ZERO(dio);
4498 if (ret == -ENOTBLK)
4499 ret = 0;
4500 if (ret != -EIOCBQUEUED)
4501 dec_page_count(sbi, F2FS_DIO_WRITE);
4502 } else {
4503 ret = iomap_dio_complete(dio);
4504 }
4505
4506 if (do_opu)
4507 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4508 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4509
4510 if (ret < 0)
4511 goto out;
4512 if (pos + ret > inode->i_size)
4513 f2fs_i_size_write(inode, pos + ret);
4514 if (!do_opu)
4515 set_inode_flag(inode, FI_UPDATE_WRITE);
4516
4517 if (iov_iter_count(from)) {
4518 ssize_t ret2;
4519 loff_t bufio_start_pos = iocb->ki_pos;
4520
4521
4522
4523
4524
4525
4526 ret2 = f2fs_buffered_write_iter(iocb, from);
4527 if (iov_iter_count(from))
4528 f2fs_write_failed(inode, iocb->ki_pos);
4529 if (ret2 < 0)
4530 goto out;
4531
4532
4533
4534
4535
4536 if (ret2 > 0) {
4537 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4538
4539 ret += ret2;
4540
4541 ret2 = filemap_write_and_wait_range(file->f_mapping,
4542 bufio_start_pos,
4543 bufio_end_pos);
4544 if (ret2 < 0)
4545 goto out;
4546 invalidate_mapping_pages(file->f_mapping,
4547 bufio_start_pos >> PAGE_SHIFT,
4548 bufio_end_pos >> PAGE_SHIFT);
4549 }
4550 } else {
4551
4552 *may_need_sync = false;
4553 }
4554 out:
4555 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4556 return ret;
4557 }
4558
4559 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4560 {
4561 struct inode *inode = file_inode(iocb->ki_filp);
4562 const loff_t orig_pos = iocb->ki_pos;
4563 const size_t orig_count = iov_iter_count(from);
4564 loff_t target_size;
4565 bool dio;
4566 bool may_need_sync = true;
4567 int preallocated;
4568 ssize_t ret;
4569
4570 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4571 ret = -EIO;
4572 goto out;
4573 }
4574
4575 if (!f2fs_is_compress_backend_ready(inode)) {
4576 ret = -EOPNOTSUPP;
4577 goto out;
4578 }
4579
4580 if (iocb->ki_flags & IOCB_NOWAIT) {
4581 if (!inode_trylock(inode)) {
4582 ret = -EAGAIN;
4583 goto out;
4584 }
4585 } else {
4586 inode_lock(inode);
4587 }
4588
4589 ret = f2fs_write_checks(iocb, from);
4590 if (ret <= 0)
4591 goto out_unlock;
4592
4593
4594 dio = f2fs_should_use_dio(inode, iocb, from);
4595
4596
4597 target_size = iocb->ki_pos + iov_iter_count(from);
4598 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4599 if (preallocated < 0) {
4600 ret = preallocated;
4601 } else {
4602 if (trace_f2fs_datawrite_start_enabled()) {
4603 char *p = f2fs_kmalloc(F2FS_I_SB(inode),
4604 PATH_MAX, GFP_KERNEL);
4605 char *path;
4606
4607 if (!p)
4608 goto skip_write_trace;
4609 path = dentry_path_raw(file_dentry(iocb->ki_filp),
4610 p, PATH_MAX);
4611 if (IS_ERR(path)) {
4612 kfree(p);
4613 goto skip_write_trace;
4614 }
4615 trace_f2fs_datawrite_start(inode, orig_pos, orig_count,
4616 current->pid, path, current->comm);
4617 kfree(p);
4618 }
4619 skip_write_trace:
4620
4621 ret = dio ?
4622 f2fs_dio_write_iter(iocb, from, &may_need_sync):
4623 f2fs_buffered_write_iter(iocb, from);
4624
4625 if (trace_f2fs_datawrite_end_enabled())
4626 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4627 }
4628
4629
4630 if (preallocated && i_size_read(inode) < target_size) {
4631 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4632 filemap_invalidate_lock(inode->i_mapping);
4633 if (!f2fs_truncate(inode))
4634 file_dont_truncate(inode);
4635 filemap_invalidate_unlock(inode->i_mapping);
4636 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4637 } else {
4638 file_dont_truncate(inode);
4639 }
4640
4641 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4642 out_unlock:
4643 inode_unlock(inode);
4644 out:
4645 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4646 if (ret > 0 && may_need_sync)
4647 ret = generic_write_sync(iocb, ret);
4648 return ret;
4649 }
4650
4651 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4652 int advice)
4653 {
4654 struct address_space *mapping;
4655 struct backing_dev_info *bdi;
4656 struct inode *inode = file_inode(filp);
4657 int err;
4658
4659 if (advice == POSIX_FADV_SEQUENTIAL) {
4660 if (S_ISFIFO(inode->i_mode))
4661 return -ESPIPE;
4662
4663 mapping = filp->f_mapping;
4664 if (!mapping || len < 0)
4665 return -EINVAL;
4666
4667 bdi = inode_to_bdi(mapping->host);
4668 filp->f_ra.ra_pages = bdi->ra_pages *
4669 F2FS_I_SB(inode)->seq_file_ra_mul;
4670 spin_lock(&filp->f_lock);
4671 filp->f_mode &= ~FMODE_RANDOM;
4672 spin_unlock(&filp->f_lock);
4673 return 0;
4674 }
4675
4676 err = generic_fadvise(filp, offset, len, advice);
4677 if (!err && advice == POSIX_FADV_DONTNEED &&
4678 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4679 f2fs_compressed_file(inode))
4680 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4681
4682 return err;
4683 }
4684
4685 #ifdef CONFIG_COMPAT
4686 struct compat_f2fs_gc_range {
4687 u32 sync;
4688 compat_u64 start;
4689 compat_u64 len;
4690 };
4691 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4692 struct compat_f2fs_gc_range)
4693
4694 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4695 {
4696 struct compat_f2fs_gc_range __user *urange;
4697 struct f2fs_gc_range range;
4698 int err;
4699
4700 urange = compat_ptr(arg);
4701 err = get_user(range.sync, &urange->sync);
4702 err |= get_user(range.start, &urange->start);
4703 err |= get_user(range.len, &urange->len);
4704 if (err)
4705 return -EFAULT;
4706
4707 return __f2fs_ioc_gc_range(file, &range);
4708 }
4709
4710 struct compat_f2fs_move_range {
4711 u32 dst_fd;
4712 compat_u64 pos_in;
4713 compat_u64 pos_out;
4714 compat_u64 len;
4715 };
4716 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4717 struct compat_f2fs_move_range)
4718
4719 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4720 {
4721 struct compat_f2fs_move_range __user *urange;
4722 struct f2fs_move_range range;
4723 int err;
4724
4725 urange = compat_ptr(arg);
4726 err = get_user(range.dst_fd, &urange->dst_fd);
4727 err |= get_user(range.pos_in, &urange->pos_in);
4728 err |= get_user(range.pos_out, &urange->pos_out);
4729 err |= get_user(range.len, &urange->len);
4730 if (err)
4731 return -EFAULT;
4732
4733 return __f2fs_ioc_move_range(file, &range);
4734 }
4735
4736 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4737 {
4738 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4739 return -EIO;
4740 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4741 return -ENOSPC;
4742
4743 switch (cmd) {
4744 case FS_IOC32_GETVERSION:
4745 cmd = FS_IOC_GETVERSION;
4746 break;
4747 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4748 return f2fs_compat_ioc_gc_range(file, arg);
4749 case F2FS_IOC32_MOVE_RANGE:
4750 return f2fs_compat_ioc_move_range(file, arg);
4751 case F2FS_IOC_START_ATOMIC_WRITE:
4752 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4753 case F2FS_IOC_START_VOLATILE_WRITE:
4754 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4755 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4756 case F2FS_IOC_SHUTDOWN:
4757 case FITRIM:
4758 case FS_IOC_SET_ENCRYPTION_POLICY:
4759 case FS_IOC_GET_ENCRYPTION_PWSALT:
4760 case FS_IOC_GET_ENCRYPTION_POLICY:
4761 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4762 case FS_IOC_ADD_ENCRYPTION_KEY:
4763 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4764 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4765 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4766 case FS_IOC_GET_ENCRYPTION_NONCE:
4767 case F2FS_IOC_GARBAGE_COLLECT:
4768 case F2FS_IOC_WRITE_CHECKPOINT:
4769 case F2FS_IOC_DEFRAGMENT:
4770 case F2FS_IOC_FLUSH_DEVICE:
4771 case F2FS_IOC_GET_FEATURES:
4772 case F2FS_IOC_GET_PIN_FILE:
4773 case F2FS_IOC_SET_PIN_FILE:
4774 case F2FS_IOC_PRECACHE_EXTENTS:
4775 case F2FS_IOC_RESIZE_FS:
4776 case FS_IOC_ENABLE_VERITY:
4777 case FS_IOC_MEASURE_VERITY:
4778 case FS_IOC_READ_VERITY_METADATA:
4779 case FS_IOC_GETFSLABEL:
4780 case FS_IOC_SETFSLABEL:
4781 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4782 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4783 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4784 case F2FS_IOC_SEC_TRIM_FILE:
4785 case F2FS_IOC_GET_COMPRESS_OPTION:
4786 case F2FS_IOC_SET_COMPRESS_OPTION:
4787 case F2FS_IOC_DECOMPRESS_FILE:
4788 case F2FS_IOC_COMPRESS_FILE:
4789 break;
4790 default:
4791 return -ENOIOCTLCMD;
4792 }
4793 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4794 }
4795 #endif
4796
4797 const struct file_operations f2fs_file_operations = {
4798 .llseek = f2fs_llseek,
4799 .read_iter = f2fs_file_read_iter,
4800 .write_iter = f2fs_file_write_iter,
4801 .open = f2fs_file_open,
4802 .release = f2fs_release_file,
4803 .mmap = f2fs_file_mmap,
4804 .flush = f2fs_file_flush,
4805 .fsync = f2fs_sync_file,
4806 .fallocate = f2fs_fallocate,
4807 .unlocked_ioctl = f2fs_ioctl,
4808 #ifdef CONFIG_COMPAT
4809 .compat_ioctl = f2fs_compat_ioctl,
4810 #endif
4811 .splice_read = generic_file_splice_read,
4812 .splice_write = iter_file_splice_write,
4813 .fadvise = f2fs_file_fadvise,
4814 };