0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/fs.h>
0011 #include <linux/mm.h>
0012 #include <linux/writeback.h>
0013 #include "nilfs.h"
0014 #include "segment.h"
0015
0016 int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
0017 {
0018
0019
0020
0021
0022
0023
0024
0025
0026 struct the_nilfs *nilfs;
0027 struct inode *inode = file->f_mapping->host;
0028 int err = 0;
0029
0030 if (nilfs_inode_dirty(inode)) {
0031 if (datasync)
0032 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
0033 start, end);
0034 else
0035 err = nilfs_construct_segment(inode->i_sb);
0036 }
0037
0038 nilfs = inode->i_sb->s_fs_info;
0039 if (!err)
0040 err = nilfs_flush_device(nilfs);
0041
0042 return err;
0043 }
0044
0045 static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
0046 {
0047 struct vm_area_struct *vma = vmf->vma;
0048 struct page *page = vmf->page;
0049 struct inode *inode = file_inode(vma->vm_file);
0050 struct nilfs_transaction_info ti;
0051 int ret = 0;
0052
0053 if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
0054 return VM_FAULT_SIGBUS;
0055
0056 sb_start_pagefault(inode->i_sb);
0057 lock_page(page);
0058 if (page->mapping != inode->i_mapping ||
0059 page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
0060 unlock_page(page);
0061 ret = -EFAULT;
0062 goto out;
0063 }
0064
0065
0066
0067
0068 if (PageMappedToDisk(page))
0069 goto mapped;
0070
0071 if (page_has_buffers(page)) {
0072 struct buffer_head *bh, *head;
0073 int fully_mapped = 1;
0074
0075 bh = head = page_buffers(page);
0076 do {
0077 if (!buffer_mapped(bh)) {
0078 fully_mapped = 0;
0079 break;
0080 }
0081 } while (bh = bh->b_this_page, bh != head);
0082
0083 if (fully_mapped) {
0084 SetPageMappedToDisk(page);
0085 goto mapped;
0086 }
0087 }
0088 unlock_page(page);
0089
0090
0091
0092
0093 ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
0094
0095 if (unlikely(ret))
0096 goto out;
0097
0098 file_update_time(vma->vm_file);
0099 ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
0100 if (ret) {
0101 nilfs_transaction_abort(inode->i_sb);
0102 goto out;
0103 }
0104 nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
0105 nilfs_transaction_commit(inode->i_sb);
0106
0107 mapped:
0108 wait_for_stable_page(page);
0109 out:
0110 sb_end_pagefault(inode->i_sb);
0111 return block_page_mkwrite_return(ret);
0112 }
0113
0114 static const struct vm_operations_struct nilfs_file_vm_ops = {
0115 .fault = filemap_fault,
0116 .map_pages = filemap_map_pages,
0117 .page_mkwrite = nilfs_page_mkwrite,
0118 };
0119
0120 static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
0121 {
0122 file_accessed(file);
0123 vma->vm_ops = &nilfs_file_vm_ops;
0124 return 0;
0125 }
0126
0127
0128
0129
0130
0131 const struct file_operations nilfs_file_operations = {
0132 .llseek = generic_file_llseek,
0133 .read_iter = generic_file_read_iter,
0134 .write_iter = generic_file_write_iter,
0135 .unlocked_ioctl = nilfs_ioctl,
0136 #ifdef CONFIG_COMPAT
0137 .compat_ioctl = nilfs_compat_ioctl,
0138 #endif
0139 .mmap = nilfs_file_mmap,
0140 .open = generic_file_open,
0141
0142 .fsync = nilfs_sync_file,
0143 .splice_read = generic_file_splice_read,
0144 .splice_write = iter_file_splice_write,
0145 };
0146
0147 const struct inode_operations nilfs_file_inode_operations = {
0148 .setattr = nilfs_setattr,
0149 .permission = nilfs_permission,
0150 .fiemap = nilfs_fiemap,
0151 .fileattr_get = nilfs_fileattr_get,
0152 .fileattr_set = nilfs_fileattr_set,
0153 };
0154
0155