0001
0002
0003
0004
0005
0006
0007 #include <linux/module.h>
0008 #include <linux/fs.h>
0009 #include <linux/buffer_head.h>
0010 #include <linux/mpage.h>
0011 #include "omfs.h"
0012
0013 static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset)
0014 {
0015 return (sbi->s_sys_blocksize - offset -
0016 sizeof(struct omfs_extent)) /
0017 sizeof(struct omfs_extent_entry) + 1;
0018 }
0019
0020 void omfs_make_empty_table(struct buffer_head *bh, int offset)
0021 {
0022 struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset];
0023
0024 oe->e_next = ~cpu_to_be64(0ULL);
0025 oe->e_extent_count = cpu_to_be32(1),
0026 oe->e_fill = cpu_to_be32(0x22),
0027 oe->e_entry.e_cluster = ~cpu_to_be64(0ULL);
0028 oe->e_entry.e_blocks = ~cpu_to_be64(0ULL);
0029 }
0030
0031 int omfs_shrink_inode(struct inode *inode)
0032 {
0033 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
0034 struct omfs_extent *oe;
0035 struct omfs_extent_entry *entry;
0036 struct buffer_head *bh;
0037 u64 next, last;
0038 u32 extent_count;
0039 u32 max_extents;
0040 int ret;
0041
0042
0043
0044
0045 next = inode->i_ino;
0046
0047
0048 ret = -EIO;
0049 if (inode->i_size != 0)
0050 goto out;
0051
0052 bh = omfs_bread(inode->i_sb, next);
0053 if (!bh)
0054 goto out;
0055
0056 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
0057 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
0058
0059 for (;;) {
0060
0061 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
0062 goto out_brelse;
0063
0064 extent_count = be32_to_cpu(oe->e_extent_count);
0065
0066 if (extent_count > max_extents)
0067 goto out_brelse;
0068
0069 last = next;
0070 next = be64_to_cpu(oe->e_next);
0071 entry = &oe->e_entry;
0072
0073
0074 for (; extent_count > 1; extent_count--) {
0075 u64 start, count;
0076 start = be64_to_cpu(entry->e_cluster);
0077 count = be64_to_cpu(entry->e_blocks);
0078
0079 omfs_clear_range(inode->i_sb, start, (int) count);
0080 entry++;
0081 }
0082 omfs_make_empty_table(bh, (char *) oe - bh->b_data);
0083 mark_buffer_dirty(bh);
0084 brelse(bh);
0085
0086 if (last != inode->i_ino)
0087 omfs_clear_range(inode->i_sb, last, sbi->s_mirrors);
0088
0089 if (next == ~0)
0090 break;
0091
0092 bh = omfs_bread(inode->i_sb, next);
0093 if (!bh)
0094 goto out;
0095 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
0096 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
0097 }
0098 ret = 0;
0099 out:
0100 return ret;
0101 out_brelse:
0102 brelse(bh);
0103 return ret;
0104 }
0105
0106 static void omfs_truncate(struct inode *inode)
0107 {
0108 omfs_shrink_inode(inode);
0109 mark_inode_dirty(inode);
0110 }
0111
0112
0113
0114
0115
0116 static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
0117 u64 *ret_block)
0118 {
0119 struct omfs_extent_entry *terminator;
0120 struct omfs_extent_entry *entry = &oe->e_entry;
0121 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
0122 u32 extent_count = be32_to_cpu(oe->e_extent_count);
0123 u64 new_block = 0;
0124 u32 max_count;
0125 int new_count;
0126 int ret = 0;
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 if (extent_count < 1)
0139 return -EIO;
0140
0141
0142 terminator = entry + extent_count - 1;
0143 if (extent_count > 1) {
0144 entry = terminator-1;
0145 new_block = be64_to_cpu(entry->e_cluster) +
0146 be64_to_cpu(entry->e_blocks);
0147
0148 if (omfs_allocate_block(inode->i_sb, new_block)) {
0149 be64_add_cpu(&entry->e_blocks, 1);
0150 terminator->e_blocks = ~(cpu_to_be64(
0151 be64_to_cpu(~terminator->e_blocks) + 1));
0152 goto out;
0153 }
0154 }
0155 max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);
0156
0157
0158 if (be32_to_cpu(oe->e_extent_count) > max_count-1)
0159 return -EIO;
0160
0161
0162 ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
0163 &new_block, &new_count);
0164 if (ret)
0165 goto out_fail;
0166
0167
0168 entry = terminator;
0169 terminator++;
0170 memcpy(terminator, entry, sizeof(struct omfs_extent_entry));
0171
0172 entry->e_cluster = cpu_to_be64(new_block);
0173 entry->e_blocks = cpu_to_be64((u64) new_count);
0174
0175 terminator->e_blocks = ~(cpu_to_be64(
0176 be64_to_cpu(~terminator->e_blocks) + (u64) new_count));
0177
0178
0179 be32_add_cpu(&oe->e_extent_count, 1);
0180
0181 out:
0182 *ret_block = new_block;
0183 out_fail:
0184 return ret;
0185 }
0186
0187
0188
0189
0190
0191 static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent,
0192 sector_t block, int count, int *left)
0193 {
0194
0195 sector_t searched = 0;
0196 for (; count > 1; count--) {
0197 int numblocks = clus_to_blk(OMFS_SB(inode->i_sb),
0198 be64_to_cpu(ent->e_blocks));
0199
0200 if (block >= searched &&
0201 block < searched + numblocks) {
0202
0203
0204
0205
0206 *left = numblocks - (block - searched);
0207 return clus_to_blk(OMFS_SB(inode->i_sb),
0208 be64_to_cpu(ent->e_cluster)) +
0209 block - searched;
0210 }
0211 searched += numblocks;
0212 ent++;
0213 }
0214 return 0;
0215 }
0216
0217 static int omfs_get_block(struct inode *inode, sector_t block,
0218 struct buffer_head *bh_result, int create)
0219 {
0220 struct buffer_head *bh;
0221 sector_t next, offset;
0222 int ret;
0223 u64 new_block;
0224 u32 max_extents;
0225 int extent_count;
0226 struct omfs_extent *oe;
0227 struct omfs_extent_entry *entry;
0228 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
0229 int max_blocks = bh_result->b_size >> inode->i_blkbits;
0230 int remain;
0231
0232 ret = -EIO;
0233 bh = omfs_bread(inode->i_sb, inode->i_ino);
0234 if (!bh)
0235 goto out;
0236
0237 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
0238 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
0239 next = inode->i_ino;
0240
0241 for (;;) {
0242
0243 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
0244 goto out_brelse;
0245
0246 extent_count = be32_to_cpu(oe->e_extent_count);
0247 next = be64_to_cpu(oe->e_next);
0248 entry = &oe->e_entry;
0249
0250 if (extent_count > max_extents)
0251 goto out_brelse;
0252
0253 offset = find_block(inode, entry, block, extent_count, &remain);
0254 if (offset > 0) {
0255 ret = 0;
0256 map_bh(bh_result, inode->i_sb, offset);
0257 if (remain > max_blocks)
0258 remain = max_blocks;
0259 bh_result->b_size = (remain << inode->i_blkbits);
0260 goto out_brelse;
0261 }
0262 if (next == ~0)
0263 break;
0264
0265 brelse(bh);
0266 bh = omfs_bread(inode->i_sb, next);
0267 if (!bh)
0268 goto out;
0269 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
0270 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
0271 }
0272 if (create) {
0273 ret = omfs_grow_extent(inode, oe, &new_block);
0274 if (ret == 0) {
0275 mark_buffer_dirty(bh);
0276 mark_inode_dirty(inode);
0277 map_bh(bh_result, inode->i_sb,
0278 clus_to_blk(sbi, new_block));
0279 }
0280 }
0281 out_brelse:
0282 brelse(bh);
0283 out:
0284 return ret;
0285 }
0286
0287 static int omfs_read_folio(struct file *file, struct folio *folio)
0288 {
0289 return block_read_full_folio(folio, omfs_get_block);
0290 }
0291
0292 static void omfs_readahead(struct readahead_control *rac)
0293 {
0294 mpage_readahead(rac, omfs_get_block);
0295 }
0296
0297 static int omfs_writepage(struct page *page, struct writeback_control *wbc)
0298 {
0299 return block_write_full_page(page, omfs_get_block, wbc);
0300 }
0301
0302 static int
0303 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
0304 {
0305 return mpage_writepages(mapping, wbc, omfs_get_block);
0306 }
0307
0308 static void omfs_write_failed(struct address_space *mapping, loff_t to)
0309 {
0310 struct inode *inode = mapping->host;
0311
0312 if (to > inode->i_size) {
0313 truncate_pagecache(inode, inode->i_size);
0314 omfs_truncate(inode);
0315 }
0316 }
0317
0318 static int omfs_write_begin(struct file *file, struct address_space *mapping,
0319 loff_t pos, unsigned len,
0320 struct page **pagep, void **fsdata)
0321 {
0322 int ret;
0323
0324 ret = block_write_begin(mapping, pos, len, pagep, omfs_get_block);
0325 if (unlikely(ret))
0326 omfs_write_failed(mapping, pos + len);
0327
0328 return ret;
0329 }
0330
0331 static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
0332 {
0333 return generic_block_bmap(mapping, block, omfs_get_block);
0334 }
0335
0336 const struct file_operations omfs_file_operations = {
0337 .llseek = generic_file_llseek,
0338 .read_iter = generic_file_read_iter,
0339 .write_iter = generic_file_write_iter,
0340 .mmap = generic_file_mmap,
0341 .fsync = generic_file_fsync,
0342 .splice_read = generic_file_splice_read,
0343 };
0344
0345 static int omfs_setattr(struct user_namespace *mnt_userns,
0346 struct dentry *dentry, struct iattr *attr)
0347 {
0348 struct inode *inode = d_inode(dentry);
0349 int error;
0350
0351 error = setattr_prepare(&init_user_ns, dentry, attr);
0352 if (error)
0353 return error;
0354
0355 if ((attr->ia_valid & ATTR_SIZE) &&
0356 attr->ia_size != i_size_read(inode)) {
0357 error = inode_newsize_ok(inode, attr->ia_size);
0358 if (error)
0359 return error;
0360 truncate_setsize(inode, attr->ia_size);
0361 omfs_truncate(inode);
0362 }
0363
0364 setattr_copy(&init_user_ns, inode, attr);
0365 mark_inode_dirty(inode);
0366 return 0;
0367 }
0368
0369 const struct inode_operations omfs_file_inops = {
0370 .setattr = omfs_setattr,
0371 };
0372
0373 const struct address_space_operations omfs_aops = {
0374 .dirty_folio = block_dirty_folio,
0375 .invalidate_folio = block_invalidate_folio,
0376 .read_folio = omfs_read_folio,
0377 .readahead = omfs_readahead,
0378 .writepage = omfs_writepage,
0379 .writepages = omfs_writepages,
0380 .write_begin = omfs_write_begin,
0381 .write_end = generic_write_end,
0382 .bmap = omfs_bmap,
0383 };
0384