0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "hpfs_fn.h"
0011 #include <linux/mpage.h>
0012 #include <linux/iomap.h>
0013 #include <linux/fiemap.h>
0014
0015 #define BLOCKS(size) (((size) + 511) >> 9)
0016
0017 static int hpfs_file_release(struct inode *inode, struct file *file)
0018 {
0019 hpfs_lock(inode->i_sb);
0020 hpfs_write_if_changed(inode);
0021 hpfs_unlock(inode->i_sb);
0022 return 0;
0023 }
0024
0025 int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
0026 {
0027 struct inode *inode = file->f_mapping->host;
0028 int ret;
0029
0030 ret = file_write_and_wait_range(file, start, end);
0031 if (ret)
0032 return ret;
0033 return sync_blockdev(inode->i_sb->s_bdev);
0034 }
0035
0036
0037
0038
0039
0040
0041 static secno hpfs_bmap(struct inode *inode, unsigned file_secno, unsigned *n_secs)
0042 {
0043 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
0044 unsigned n, disk_secno;
0045 struct fnode *fnode;
0046 struct buffer_head *bh;
0047 if (BLOCKS(hpfs_i(inode)->mmu_private) <= file_secno) return 0;
0048 n = file_secno - hpfs_inode->i_file_sec;
0049 if (n < hpfs_inode->i_n_secs) {
0050 *n_secs = hpfs_inode->i_n_secs - n;
0051 return hpfs_inode->i_disk_sec + n;
0052 }
0053 if (!(fnode = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) return 0;
0054 disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh);
0055 if (disk_secno == -1) return 0;
0056 if (hpfs_chk_sectors(inode->i_sb, disk_secno, 1, "bmap")) return 0;
0057 n = file_secno - hpfs_inode->i_file_sec;
0058 if (n < hpfs_inode->i_n_secs) {
0059 *n_secs = hpfs_inode->i_n_secs - n;
0060 return hpfs_inode->i_disk_sec + n;
0061 }
0062 *n_secs = 1;
0063 return disk_secno;
0064 }
0065
0066 void hpfs_truncate(struct inode *i)
0067 {
0068 if (IS_IMMUTABLE(i)) return ;
0069 hpfs_lock_assert(i->i_sb);
0070
0071 hpfs_i(i)->i_n_secs = 0;
0072 i->i_blocks = 1 + ((i->i_size + 511) >> 9);
0073 hpfs_i(i)->mmu_private = i->i_size;
0074 hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
0075 hpfs_write_inode(i);
0076 hpfs_i(i)->i_n_secs = 0;
0077 }
0078
0079 static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
0080 {
0081 int r;
0082 secno s;
0083 unsigned n_secs;
0084 hpfs_lock(inode->i_sb);
0085 s = hpfs_bmap(inode, iblock, &n_secs);
0086 if (s) {
0087 if (bh_result->b_size >> 9 < n_secs)
0088 n_secs = bh_result->b_size >> 9;
0089 n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
0090 if (unlikely(!n_secs)) {
0091 s = hpfs_search_hotfix_map(inode->i_sb, s);
0092 n_secs = 1;
0093 }
0094 map_bh(bh_result, inode->i_sb, s);
0095 bh_result->b_size = n_secs << 9;
0096 goto ret_0;
0097 }
0098 if (!create) goto ret_0;
0099 if (iblock<<9 != hpfs_i(inode)->mmu_private) {
0100 BUG();
0101 r = -EIO;
0102 goto ret_r;
0103 }
0104 if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
0105 hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
0106 r = -ENOSPC;
0107 goto ret_r;
0108 }
0109 inode->i_blocks++;
0110 hpfs_i(inode)->mmu_private += 512;
0111 set_buffer_new(bh_result);
0112 map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
0113 ret_0:
0114 r = 0;
0115 ret_r:
0116 hpfs_unlock(inode->i_sb);
0117 return r;
0118 }
0119
0120 static int hpfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
0121 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
0122 {
0123 struct super_block *sb = inode->i_sb;
0124 unsigned int blkbits = inode->i_blkbits;
0125 unsigned int n_secs;
0126 secno s;
0127
0128 if (WARN_ON_ONCE(flags & (IOMAP_WRITE | IOMAP_ZERO)))
0129 return -EINVAL;
0130
0131 iomap->bdev = inode->i_sb->s_bdev;
0132 iomap->offset = offset;
0133
0134 hpfs_lock(sb);
0135 s = hpfs_bmap(inode, offset >> blkbits, &n_secs);
0136 if (s) {
0137 n_secs = hpfs_search_hotfix_map_for_range(sb, s,
0138 min_t(loff_t, n_secs, length));
0139 if (unlikely(!n_secs)) {
0140 s = hpfs_search_hotfix_map(sb, s);
0141 n_secs = 1;
0142 }
0143 iomap->type = IOMAP_MAPPED;
0144 iomap->flags = IOMAP_F_MERGED;
0145 iomap->addr = (u64)s << blkbits;
0146 iomap->length = (u64)n_secs << blkbits;
0147 } else {
0148 iomap->type = IOMAP_HOLE;
0149 iomap->addr = IOMAP_NULL_ADDR;
0150 iomap->length = 1 << blkbits;
0151 }
0152
0153 hpfs_unlock(sb);
0154 return 0;
0155 }
0156
0157 static const struct iomap_ops hpfs_iomap_ops = {
0158 .iomap_begin = hpfs_iomap_begin,
0159 };
0160
0161 static int hpfs_read_folio(struct file *file, struct folio *folio)
0162 {
0163 return mpage_read_folio(folio, hpfs_get_block);
0164 }
0165
0166 static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
0167 {
0168 return block_write_full_page(page, hpfs_get_block, wbc);
0169 }
0170
0171 static void hpfs_readahead(struct readahead_control *rac)
0172 {
0173 mpage_readahead(rac, hpfs_get_block);
0174 }
0175
0176 static int hpfs_writepages(struct address_space *mapping,
0177 struct writeback_control *wbc)
0178 {
0179 return mpage_writepages(mapping, wbc, hpfs_get_block);
0180 }
0181
0182 static void hpfs_write_failed(struct address_space *mapping, loff_t to)
0183 {
0184 struct inode *inode = mapping->host;
0185
0186 hpfs_lock(inode->i_sb);
0187
0188 if (to > inode->i_size) {
0189 truncate_pagecache(inode, inode->i_size);
0190 hpfs_truncate(inode);
0191 }
0192
0193 hpfs_unlock(inode->i_sb);
0194 }
0195
0196 static int hpfs_write_begin(struct file *file, struct address_space *mapping,
0197 loff_t pos, unsigned len,
0198 struct page **pagep, void **fsdata)
0199 {
0200 int ret;
0201
0202 *pagep = NULL;
0203 ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
0204 hpfs_get_block,
0205 &hpfs_i(mapping->host)->mmu_private);
0206 if (unlikely(ret))
0207 hpfs_write_failed(mapping, pos + len);
0208
0209 return ret;
0210 }
0211
0212 static int hpfs_write_end(struct file *file, struct address_space *mapping,
0213 loff_t pos, unsigned len, unsigned copied,
0214 struct page *pagep, void *fsdata)
0215 {
0216 struct inode *inode = mapping->host;
0217 int err;
0218 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
0219 if (err < len)
0220 hpfs_write_failed(mapping, pos + len);
0221 if (!(err < 0)) {
0222
0223 hpfs_lock(inode->i_sb);
0224 hpfs_i(inode)->i_dirty = 1;
0225 hpfs_unlock(inode->i_sb);
0226 }
0227 return err;
0228 }
0229
0230 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
0231 {
0232 return generic_block_bmap(mapping, block, hpfs_get_block);
0233 }
0234
0235 static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len)
0236 {
0237 int ret;
0238
0239 inode_lock(inode);
0240 len = min_t(u64, len, i_size_read(inode));
0241 ret = iomap_fiemap(inode, fieinfo, start, len, &hpfs_iomap_ops);
0242 inode_unlock(inode);
0243
0244 return ret;
0245 }
0246
0247 const struct address_space_operations hpfs_aops = {
0248 .dirty_folio = block_dirty_folio,
0249 .invalidate_folio = block_invalidate_folio,
0250 .read_folio = hpfs_read_folio,
0251 .writepage = hpfs_writepage,
0252 .readahead = hpfs_readahead,
0253 .writepages = hpfs_writepages,
0254 .write_begin = hpfs_write_begin,
0255 .write_end = hpfs_write_end,
0256 .bmap = _hpfs_bmap
0257 };
0258
0259 const struct file_operations hpfs_file_ops =
0260 {
0261 .llseek = generic_file_llseek,
0262 .read_iter = generic_file_read_iter,
0263 .write_iter = generic_file_write_iter,
0264 .mmap = generic_file_mmap,
0265 .release = hpfs_file_release,
0266 .fsync = hpfs_file_fsync,
0267 .splice_read = generic_file_splice_read,
0268 .unlocked_ioctl = hpfs_ioctl,
0269 .compat_ioctl = compat_ptr_ioctl,
0270 };
0271
0272 const struct inode_operations hpfs_file_iops =
0273 {
0274 .setattr = hpfs_setattr,
0275 .fiemap = hpfs_fiemap,
0276 };