0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 #include "ubifs.h"
0041 #include <linux/mount.h>
0042 #include <linux/slab.h>
0043 #include <linux/migrate.h>
0044
0045 static int read_block(struct inode *inode, void *addr, unsigned int block,
0046 struct ubifs_data_node *dn)
0047 {
0048 struct ubifs_info *c = inode->i_sb->s_fs_info;
0049 int err, len, out_len;
0050 union ubifs_key key;
0051 unsigned int dlen;
0052
0053 data_key_init(c, &key, inode->i_ino, block);
0054 err = ubifs_tnc_lookup(c, &key, dn);
0055 if (err) {
0056 if (err == -ENOENT)
0057
0058 memset(addr, 0, UBIFS_BLOCK_SIZE);
0059 return err;
0060 }
0061
0062 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
0063 ubifs_inode(inode)->creat_sqnum);
0064 len = le32_to_cpu(dn->size);
0065 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
0066 goto dump;
0067
0068 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
0069
0070 if (IS_ENCRYPTED(inode)) {
0071 err = ubifs_decrypt(inode, dn, &dlen, block);
0072 if (err)
0073 goto dump;
0074 }
0075
0076 out_len = UBIFS_BLOCK_SIZE;
0077 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
0078 le16_to_cpu(dn->compr_type));
0079 if (err || len != out_len)
0080 goto dump;
0081
0082
0083
0084
0085
0086
0087 if (len < UBIFS_BLOCK_SIZE)
0088 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
0089
0090 return 0;
0091
0092 dump:
0093 ubifs_err(c, "bad data node (block %u, inode %lu)",
0094 block, inode->i_ino);
0095 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
0096 return -EINVAL;
0097 }
0098
0099 static int do_readpage(struct page *page)
0100 {
0101 void *addr;
0102 int err = 0, i;
0103 unsigned int block, beyond;
0104 struct ubifs_data_node *dn;
0105 struct inode *inode = page->mapping->host;
0106 struct ubifs_info *c = inode->i_sb->s_fs_info;
0107 loff_t i_size = i_size_read(inode);
0108
0109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
0110 inode->i_ino, page->index, i_size, page->flags);
0111 ubifs_assert(c, !PageChecked(page));
0112 ubifs_assert(c, !PagePrivate(page));
0113
0114 addr = kmap(page);
0115
0116 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
0117 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
0118 if (block >= beyond) {
0119
0120 SetPageChecked(page);
0121 memset(addr, 0, PAGE_SIZE);
0122 goto out;
0123 }
0124
0125 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
0126 if (!dn) {
0127 err = -ENOMEM;
0128 goto error;
0129 }
0130
0131 i = 0;
0132 while (1) {
0133 int ret;
0134
0135 if (block >= beyond) {
0136
0137 err = -ENOENT;
0138 memset(addr, 0, UBIFS_BLOCK_SIZE);
0139 } else {
0140 ret = read_block(inode, addr, block, dn);
0141 if (ret) {
0142 err = ret;
0143 if (err != -ENOENT)
0144 break;
0145 } else if (block + 1 == beyond) {
0146 int dlen = le32_to_cpu(dn->size);
0147 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
0148
0149 if (ilen && ilen < dlen)
0150 memset(addr + ilen, 0, dlen - ilen);
0151 }
0152 }
0153 if (++i >= UBIFS_BLOCKS_PER_PAGE)
0154 break;
0155 block += 1;
0156 addr += UBIFS_BLOCK_SIZE;
0157 }
0158 if (err) {
0159 struct ubifs_info *c = inode->i_sb->s_fs_info;
0160 if (err == -ENOENT) {
0161
0162 SetPageChecked(page);
0163 dbg_gen("hole");
0164 goto out_free;
0165 }
0166 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
0167 page->index, inode->i_ino, err);
0168 goto error;
0169 }
0170
0171 out_free:
0172 kfree(dn);
0173 out:
0174 SetPageUptodate(page);
0175 ClearPageError(page);
0176 flush_dcache_page(page);
0177 kunmap(page);
0178 return 0;
0179
0180 error:
0181 kfree(dn);
0182 ClearPageUptodate(page);
0183 SetPageError(page);
0184 flush_dcache_page(page);
0185 kunmap(page);
0186 return err;
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196 static void release_new_page_budget(struct ubifs_info *c)
0197 {
0198 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
0199
0200 ubifs_release_budget(c, &req);
0201 }
0202
0203
0204
0205
0206
0207
0208
0209
0210 static void release_existing_page_budget(struct ubifs_info *c)
0211 {
0212 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
0213
0214 ubifs_release_budget(c, &req);
0215 }
0216
0217 static int write_begin_slow(struct address_space *mapping,
0218 loff_t pos, unsigned len, struct page **pagep)
0219 {
0220 struct inode *inode = mapping->host;
0221 struct ubifs_info *c = inode->i_sb->s_fs_info;
0222 pgoff_t index = pos >> PAGE_SHIFT;
0223 struct ubifs_budget_req req = { .new_page = 1 };
0224 int err, appending = !!(pos + len > inode->i_size);
0225 struct page *page;
0226
0227 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
0228 inode->i_ino, pos, len, inode->i_size);
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238 if (appending)
0239
0240 req.dirtied_ino = 1;
0241
0242 err = ubifs_budget_space(c, &req);
0243 if (unlikely(err))
0244 return err;
0245
0246 page = grab_cache_page_write_begin(mapping, index);
0247 if (unlikely(!page)) {
0248 ubifs_release_budget(c, &req);
0249 return -ENOMEM;
0250 }
0251
0252 if (!PageUptodate(page)) {
0253 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE)
0254 SetPageChecked(page);
0255 else {
0256 err = do_readpage(page);
0257 if (err) {
0258 unlock_page(page);
0259 put_page(page);
0260 ubifs_release_budget(c, &req);
0261 return err;
0262 }
0263 }
0264
0265 SetPageUptodate(page);
0266 ClearPageError(page);
0267 }
0268
0269 if (PagePrivate(page))
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 release_new_page_budget(c);
0281 else if (!PageChecked(page))
0282
0283
0284
0285
0286
0287
0288 ubifs_convert_page_budget(c);
0289
0290 if (appending) {
0291 struct ubifs_inode *ui = ubifs_inode(inode);
0292
0293
0294
0295
0296
0297
0298 mutex_lock(&ui->ui_mutex);
0299 if (ui->dirty)
0300
0301
0302
0303
0304 ubifs_release_dirty_inode_budget(c, ui);
0305 }
0306
0307 *pagep = page;
0308 return 0;
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static int allocate_budget(struct ubifs_info *c, struct page *page,
0325 struct ubifs_inode *ui, int appending)
0326 {
0327 struct ubifs_budget_req req = { .fast = 1 };
0328
0329 if (PagePrivate(page)) {
0330 if (!appending)
0331
0332
0333
0334
0335 return 0;
0336
0337 mutex_lock(&ui->ui_mutex);
0338 if (ui->dirty)
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348 return 0;
0349
0350
0351
0352
0353
0354 req.dirtied_ino = 1;
0355 } else {
0356 if (PageChecked(page))
0357
0358
0359
0360
0361
0362
0363
0364 req.new_page = 1;
0365 else
0366
0367
0368
0369
0370
0371 req.dirtied_page = 1;
0372
0373 if (appending) {
0374 mutex_lock(&ui->ui_mutex);
0375 if (!ui->dirty)
0376
0377
0378
0379
0380
0381 req.dirtied_ino = 1;
0382 }
0383 }
0384
0385 return ubifs_budget_space(c, &req);
0386 }
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420 static int ubifs_write_begin(struct file *file, struct address_space *mapping,
0421 loff_t pos, unsigned len,
0422 struct page **pagep, void **fsdata)
0423 {
0424 struct inode *inode = mapping->host;
0425 struct ubifs_info *c = inode->i_sb->s_fs_info;
0426 struct ubifs_inode *ui = ubifs_inode(inode);
0427 pgoff_t index = pos >> PAGE_SHIFT;
0428 int err, appending = !!(pos + len > inode->i_size);
0429 int skipped_read = 0;
0430 struct page *page;
0431
0432 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
0433 ubifs_assert(c, !c->ro_media && !c->ro_mount);
0434
0435 if (unlikely(c->ro_error))
0436 return -EROFS;
0437
0438
0439 page = grab_cache_page_write_begin(mapping, index);
0440 if (unlikely(!page))
0441 return -ENOMEM;
0442
0443 if (!PageUptodate(page)) {
0444
0445 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) {
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 SetPageChecked(page);
0456 skipped_read = 1;
0457 } else {
0458 err = do_readpage(page);
0459 if (err) {
0460 unlock_page(page);
0461 put_page(page);
0462 return err;
0463 }
0464 }
0465
0466 SetPageUptodate(page);
0467 ClearPageError(page);
0468 }
0469
0470 err = allocate_budget(c, page, ui, appending);
0471 if (unlikely(err)) {
0472 ubifs_assert(c, err == -ENOSPC);
0473
0474
0475
0476
0477 if (skipped_read) {
0478 ClearPageChecked(page);
0479 ClearPageUptodate(page);
0480 }
0481
0482
0483
0484
0485
0486
0487
0488 if (appending) {
0489 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
0490 mutex_unlock(&ui->ui_mutex);
0491 }
0492 unlock_page(page);
0493 put_page(page);
0494
0495 return write_begin_slow(mapping, pos, len, pagep);
0496 }
0497
0498
0499
0500
0501
0502
0503
0504 *pagep = page;
0505 return 0;
0506
0507 }
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 static void cancel_budget(struct ubifs_info *c, struct page *page,
0520 struct ubifs_inode *ui, int appending)
0521 {
0522 if (appending) {
0523 if (!ui->dirty)
0524 ubifs_release_dirty_inode_budget(c, ui);
0525 mutex_unlock(&ui->ui_mutex);
0526 }
0527 if (!PagePrivate(page)) {
0528 if (PageChecked(page))
0529 release_new_page_budget(c);
0530 else
0531 release_existing_page_budget(c);
0532 }
0533 }
0534
0535 static int ubifs_write_end(struct file *file, struct address_space *mapping,
0536 loff_t pos, unsigned len, unsigned copied,
0537 struct page *page, void *fsdata)
0538 {
0539 struct inode *inode = mapping->host;
0540 struct ubifs_inode *ui = ubifs_inode(inode);
0541 struct ubifs_info *c = inode->i_sb->s_fs_info;
0542 loff_t end_pos = pos + len;
0543 int appending = !!(end_pos > inode->i_size);
0544
0545 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
0546 inode->i_ino, pos, page->index, len, copied, inode->i_size);
0547
0548 if (unlikely(copied < len && len == PAGE_SIZE)) {
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558 dbg_gen("copied %d instead of %d, read page and repeat",
0559 copied, len);
0560 cancel_budget(c, page, ui, appending);
0561 ClearPageChecked(page);
0562
0563
0564
0565
0566
0567 copied = do_readpage(page);
0568 goto out;
0569 }
0570
0571 if (!PagePrivate(page)) {
0572 attach_page_private(page, (void *)1);
0573 atomic_long_inc(&c->dirty_pg_cnt);
0574 __set_page_dirty_nobuffers(page);
0575 }
0576
0577 if (appending) {
0578 i_size_write(inode, end_pos);
0579 ui->ui_size = end_pos;
0580
0581
0582
0583
0584
0585 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
0586 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
0587 mutex_unlock(&ui->ui_mutex);
0588 }
0589
0590 out:
0591 unlock_page(page);
0592 put_page(page);
0593 return copied;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static int populate_page(struct ubifs_info *c, struct page *page,
0606 struct bu_info *bu, int *n)
0607 {
0608 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
0609 struct inode *inode = page->mapping->host;
0610 loff_t i_size = i_size_read(inode);
0611 unsigned int page_block;
0612 void *addr, *zaddr;
0613 pgoff_t end_index;
0614
0615 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
0616 inode->i_ino, page->index, i_size, page->flags);
0617
0618 addr = zaddr = kmap(page);
0619
0620 end_index = (i_size - 1) >> PAGE_SHIFT;
0621 if (!i_size || page->index > end_index) {
0622 hole = 1;
0623 memset(addr, 0, PAGE_SIZE);
0624 goto out_hole;
0625 }
0626
0627 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
0628 while (1) {
0629 int err, len, out_len, dlen;
0630
0631 if (nn >= bu->cnt) {
0632 hole = 1;
0633 memset(addr, 0, UBIFS_BLOCK_SIZE);
0634 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
0635 struct ubifs_data_node *dn;
0636
0637 dn = bu->buf + (bu->zbranch[nn].offs - offs);
0638
0639 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
0640 ubifs_inode(inode)->creat_sqnum);
0641
0642 len = le32_to_cpu(dn->size);
0643 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
0644 goto out_err;
0645
0646 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
0647 out_len = UBIFS_BLOCK_SIZE;
0648
0649 if (IS_ENCRYPTED(inode)) {
0650 err = ubifs_decrypt(inode, dn, &dlen, page_block);
0651 if (err)
0652 goto out_err;
0653 }
0654
0655 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
0656 le16_to_cpu(dn->compr_type));
0657 if (err || len != out_len)
0658 goto out_err;
0659
0660 if (len < UBIFS_BLOCK_SIZE)
0661 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
0662
0663 nn += 1;
0664 read = (i << UBIFS_BLOCK_SHIFT) + len;
0665 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
0666 nn += 1;
0667 continue;
0668 } else {
0669 hole = 1;
0670 memset(addr, 0, UBIFS_BLOCK_SIZE);
0671 }
0672 if (++i >= UBIFS_BLOCKS_PER_PAGE)
0673 break;
0674 addr += UBIFS_BLOCK_SIZE;
0675 page_block += 1;
0676 }
0677
0678 if (end_index == page->index) {
0679 int len = i_size & (PAGE_SIZE - 1);
0680
0681 if (len && len < read)
0682 memset(zaddr + len, 0, read - len);
0683 }
0684
0685 out_hole:
0686 if (hole) {
0687 SetPageChecked(page);
0688 dbg_gen("hole");
0689 }
0690
0691 SetPageUptodate(page);
0692 ClearPageError(page);
0693 flush_dcache_page(page);
0694 kunmap(page);
0695 *n = nn;
0696 return 0;
0697
0698 out_err:
0699 ClearPageUptodate(page);
0700 SetPageError(page);
0701 flush_dcache_page(page);
0702 kunmap(page);
0703 ubifs_err(c, "bad data node (block %u, inode %lu)",
0704 page_block, inode->i_ino);
0705 return -EINVAL;
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
0717 struct page *page1)
0718 {
0719 pgoff_t offset = page1->index, end_index;
0720 struct address_space *mapping = page1->mapping;
0721 struct inode *inode = mapping->host;
0722 struct ubifs_inode *ui = ubifs_inode(inode);
0723 int err, page_idx, page_cnt, ret = 0, n = 0;
0724 int allocate = bu->buf ? 0 : 1;
0725 loff_t isize;
0726 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
0727
0728 err = ubifs_tnc_get_bu_keys(c, bu);
0729 if (err)
0730 goto out_warn;
0731
0732 if (bu->eof) {
0733
0734 ui->read_in_a_row = 1;
0735 ui->bulk_read = 0;
0736 }
0737
0738 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
0739 if (!page_cnt) {
0740
0741
0742
0743
0744
0745
0746 goto out_bu_off;
0747 }
0748
0749 if (bu->cnt) {
0750 if (allocate) {
0751
0752
0753
0754
0755 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
0756 bu->zbranch[bu->cnt - 1].len -
0757 bu->zbranch[0].offs;
0758 ubifs_assert(c, bu->buf_len > 0);
0759 ubifs_assert(c, bu->buf_len <= c->leb_size);
0760 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
0761 if (!bu->buf)
0762 goto out_bu_off;
0763 }
0764
0765 err = ubifs_tnc_bulk_read(c, bu);
0766 if (err)
0767 goto out_warn;
0768 }
0769
0770 err = populate_page(c, page1, bu, &n);
0771 if (err)
0772 goto out_warn;
0773
0774 unlock_page(page1);
0775 ret = 1;
0776
0777 isize = i_size_read(inode);
0778 if (isize == 0)
0779 goto out_free;
0780 end_index = ((isize - 1) >> PAGE_SHIFT);
0781
0782 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
0783 pgoff_t page_offset = offset + page_idx;
0784 struct page *page;
0785
0786 if (page_offset > end_index)
0787 break;
0788 page = pagecache_get_page(mapping, page_offset,
0789 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
0790 ra_gfp_mask);
0791 if (!page)
0792 break;
0793 if (!PageUptodate(page))
0794 err = populate_page(c, page, bu, &n);
0795 unlock_page(page);
0796 put_page(page);
0797 if (err)
0798 break;
0799 }
0800
0801 ui->last_page_read = offset + page_idx - 1;
0802
0803 out_free:
0804 if (allocate)
0805 kfree(bu->buf);
0806 return ret;
0807
0808 out_warn:
0809 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
0810 goto out_free;
0811
0812 out_bu_off:
0813 ui->read_in_a_row = ui->bulk_read = 0;
0814 goto out_free;
0815 }
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826 static int ubifs_bulk_read(struct page *page)
0827 {
0828 struct inode *inode = page->mapping->host;
0829 struct ubifs_info *c = inode->i_sb->s_fs_info;
0830 struct ubifs_inode *ui = ubifs_inode(inode);
0831 pgoff_t index = page->index, last_page_read = ui->last_page_read;
0832 struct bu_info *bu;
0833 int err = 0, allocated = 0;
0834
0835 ui->last_page_read = index;
0836 if (!c->bulk_read)
0837 return 0;
0838
0839
0840
0841
0842
0843 if (!mutex_trylock(&ui->ui_mutex))
0844 return 0;
0845
0846 if (index != last_page_read + 1) {
0847
0848 ui->read_in_a_row = 1;
0849 if (ui->bulk_read)
0850 ui->bulk_read = 0;
0851 goto out_unlock;
0852 }
0853
0854 if (!ui->bulk_read) {
0855 ui->read_in_a_row += 1;
0856 if (ui->read_in_a_row < 3)
0857 goto out_unlock;
0858
0859 ui->bulk_read = 1;
0860 }
0861
0862
0863
0864
0865
0866 if (mutex_trylock(&c->bu_mutex))
0867 bu = &c->bu;
0868 else {
0869 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
0870 if (!bu)
0871 goto out_unlock;
0872
0873 bu->buf = NULL;
0874 allocated = 1;
0875 }
0876
0877 bu->buf_len = c->max_bu_buf_len;
0878 data_key_init(c, &bu->key, inode->i_ino,
0879 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
0880 err = ubifs_do_bulk_read(c, bu, page);
0881
0882 if (!allocated)
0883 mutex_unlock(&c->bu_mutex);
0884 else
0885 kfree(bu);
0886
0887 out_unlock:
0888 mutex_unlock(&ui->ui_mutex);
0889 return err;
0890 }
0891
0892 static int ubifs_read_folio(struct file *file, struct folio *folio)
0893 {
0894 struct page *page = &folio->page;
0895
0896 if (ubifs_bulk_read(page))
0897 return 0;
0898 do_readpage(page);
0899 folio_unlock(folio);
0900 return 0;
0901 }
0902
0903 static int do_writepage(struct page *page, int len)
0904 {
0905 int err = 0, i, blen;
0906 unsigned int block;
0907 void *addr;
0908 union ubifs_key key;
0909 struct inode *inode = page->mapping->host;
0910 struct ubifs_info *c = inode->i_sb->s_fs_info;
0911
0912 #ifdef UBIFS_DEBUG
0913 struct ubifs_inode *ui = ubifs_inode(inode);
0914 spin_lock(&ui->ui_lock);
0915 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT);
0916 spin_unlock(&ui->ui_lock);
0917 #endif
0918
0919
0920 set_page_writeback(page);
0921
0922 addr = kmap(page);
0923 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
0924 i = 0;
0925 while (len) {
0926 blen = min_t(int, len, UBIFS_BLOCK_SIZE);
0927 data_key_init(c, &key, inode->i_ino, block);
0928 err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
0929 if (err)
0930 break;
0931 if (++i >= UBIFS_BLOCKS_PER_PAGE)
0932 break;
0933 block += 1;
0934 addr += blen;
0935 len -= blen;
0936 }
0937 if (err) {
0938 SetPageError(page);
0939 ubifs_err(c, "cannot write page %lu of inode %lu, error %d",
0940 page->index, inode->i_ino, err);
0941 ubifs_ro_mode(c, err);
0942 }
0943
0944 ubifs_assert(c, PagePrivate(page));
0945 if (PageChecked(page))
0946 release_new_page_budget(c);
0947 else
0948 release_existing_page_budget(c);
0949
0950 atomic_long_dec(&c->dirty_pg_cnt);
0951 detach_page_private(page);
0952 ClearPageChecked(page);
0953
0954 kunmap(page);
0955 unlock_page(page);
0956 end_page_writeback(page);
0957 return err;
0958 }
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006 static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1007 {
1008 struct inode *inode = page->mapping->host;
1009 struct ubifs_info *c = inode->i_sb->s_fs_info;
1010 struct ubifs_inode *ui = ubifs_inode(inode);
1011 loff_t i_size = i_size_read(inode), synced_i_size;
1012 pgoff_t end_index = i_size >> PAGE_SHIFT;
1013 int err, len = i_size & (PAGE_SIZE - 1);
1014 void *kaddr;
1015
1016 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1017 inode->i_ino, page->index, page->flags);
1018 ubifs_assert(c, PagePrivate(page));
1019
1020
1021 if (page->index > end_index || (page->index == end_index && !len)) {
1022 err = 0;
1023 goto out_unlock;
1024 }
1025
1026 spin_lock(&ui->ui_lock);
1027 synced_i_size = ui->synced_i_size;
1028 spin_unlock(&ui->ui_lock);
1029
1030
1031 if (page->index < end_index) {
1032 if (page->index >= synced_i_size >> PAGE_SHIFT) {
1033 err = inode->i_sb->s_op->write_inode(inode, NULL);
1034 if (err)
1035 goto out_unlock;
1036
1037
1038
1039
1040
1041
1042
1043
1044 }
1045 return do_writepage(page, PAGE_SIZE);
1046 }
1047
1048
1049
1050
1051
1052
1053
1054
1055 kaddr = kmap_atomic(page);
1056 memset(kaddr + len, 0, PAGE_SIZE - len);
1057 flush_dcache_page(page);
1058 kunmap_atomic(kaddr);
1059
1060 if (i_size > synced_i_size) {
1061 err = inode->i_sb->s_op->write_inode(inode, NULL);
1062 if (err)
1063 goto out_unlock;
1064 }
1065
1066 return do_writepage(page, len);
1067
1068 out_unlock:
1069 unlock_page(page);
1070 return err;
1071 }
1072
1073
1074
1075
1076
1077
1078 static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1079 {
1080 if (attr->ia_valid & ATTR_UID)
1081 inode->i_uid = attr->ia_uid;
1082 if (attr->ia_valid & ATTR_GID)
1083 inode->i_gid = attr->ia_gid;
1084 if (attr->ia_valid & ATTR_ATIME)
1085 inode->i_atime = attr->ia_atime;
1086 if (attr->ia_valid & ATTR_MTIME)
1087 inode->i_mtime = attr->ia_mtime;
1088 if (attr->ia_valid & ATTR_CTIME)
1089 inode->i_ctime = attr->ia_ctime;
1090 if (attr->ia_valid & ATTR_MODE) {
1091 umode_t mode = attr->ia_mode;
1092
1093 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1094 mode &= ~S_ISGID;
1095 inode->i_mode = mode;
1096 }
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 static int do_truncation(struct ubifs_info *c, struct inode *inode,
1110 const struct iattr *attr)
1111 {
1112 int err;
1113 struct ubifs_budget_req req;
1114 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1115 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1116 struct ubifs_inode *ui = ubifs_inode(inode);
1117
1118 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1119 memset(&req, 0, sizeof(struct ubifs_budget_req));
1120
1121
1122
1123
1124
1125
1126 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1127 req.dirtied_page = 1;
1128
1129 req.dirtied_ino = 1;
1130
1131 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1132 err = ubifs_budget_space(c, &req);
1133 if (err) {
1134
1135
1136
1137
1138 if (new_size || err != -ENOSPC)
1139 return err;
1140 budgeted = 0;
1141 }
1142
1143 truncate_setsize(inode, new_size);
1144
1145 if (offset) {
1146 pgoff_t index = new_size >> PAGE_SHIFT;
1147 struct page *page;
1148
1149 page = find_lock_page(inode->i_mapping, index);
1150 if (page) {
1151 if (PageDirty(page)) {
1152
1153
1154
1155
1156
1157
1158
1159
1160 ubifs_assert(c, PagePrivate(page));
1161
1162 clear_page_dirty_for_io(page);
1163 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1164 offset = new_size &
1165 (PAGE_SIZE - 1);
1166 err = do_writepage(page, offset);
1167 put_page(page);
1168 if (err)
1169 goto out_budg;
1170
1171
1172
1173
1174 } else {
1175
1176
1177
1178
1179
1180 unlock_page(page);
1181 put_page(page);
1182 }
1183 }
1184 }
1185
1186 mutex_lock(&ui->ui_mutex);
1187 ui->ui_size = inode->i_size;
1188
1189 inode->i_mtime = inode->i_ctime = current_time(inode);
1190
1191 do_attr_changes(inode, attr);
1192 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1193 mutex_unlock(&ui->ui_mutex);
1194
1195 out_budg:
1196 if (budgeted)
1197 ubifs_release_budget(c, &req);
1198 else {
1199 c->bi.nospace = c->bi.nospace_rp = 0;
1200 smp_wmb();
1201 }
1202 return err;
1203 }
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 static int do_setattr(struct ubifs_info *c, struct inode *inode,
1216 const struct iattr *attr)
1217 {
1218 int err, release;
1219 loff_t new_size = attr->ia_size;
1220 struct ubifs_inode *ui = ubifs_inode(inode);
1221 struct ubifs_budget_req req = { .dirtied_ino = 1,
1222 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1223
1224 err = ubifs_budget_space(c, &req);
1225 if (err)
1226 return err;
1227
1228 if (attr->ia_valid & ATTR_SIZE) {
1229 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1230 truncate_setsize(inode, new_size);
1231 }
1232
1233 mutex_lock(&ui->ui_mutex);
1234 if (attr->ia_valid & ATTR_SIZE) {
1235
1236 inode->i_mtime = inode->i_ctime = current_time(inode);
1237
1238 ui->ui_size = inode->i_size;
1239 }
1240
1241 do_attr_changes(inode, attr);
1242
1243 release = ui->dirty;
1244 if (attr->ia_valid & ATTR_SIZE)
1245
1246
1247
1248
1249 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1250 else
1251 mark_inode_dirty_sync(inode);
1252 mutex_unlock(&ui->ui_mutex);
1253
1254 if (release)
1255 ubifs_release_budget(c, &req);
1256 if (IS_SYNC(inode))
1257 err = inode->i_sb->s_op->write_inode(inode, NULL);
1258 return err;
1259 }
1260
1261 int ubifs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1262 struct iattr *attr)
1263 {
1264 int err;
1265 struct inode *inode = d_inode(dentry);
1266 struct ubifs_info *c = inode->i_sb->s_fs_info;
1267
1268 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1269 inode->i_ino, inode->i_mode, attr->ia_valid);
1270 err = setattr_prepare(&init_user_ns, dentry, attr);
1271 if (err)
1272 return err;
1273
1274 err = dbg_check_synced_i_size(c, inode);
1275 if (err)
1276 return err;
1277
1278 err = fscrypt_prepare_setattr(dentry, attr);
1279 if (err)
1280 return err;
1281
1282 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1283
1284 err = do_truncation(c, inode, attr);
1285 else
1286 err = do_setattr(c, inode, attr);
1287
1288 return err;
1289 }
1290
1291 static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1292 size_t length)
1293 {
1294 struct inode *inode = folio->mapping->host;
1295 struct ubifs_info *c = inode->i_sb->s_fs_info;
1296
1297 ubifs_assert(c, folio_test_private(folio));
1298 if (offset || length < folio_size(folio))
1299
1300 return;
1301
1302 if (folio_test_checked(folio))
1303 release_new_page_budget(c);
1304 else
1305 release_existing_page_budget(c);
1306
1307 atomic_long_dec(&c->dirty_pg_cnt);
1308 folio_detach_private(folio);
1309 folio_clear_checked(folio);
1310 }
1311
1312 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1313 {
1314 struct inode *inode = file->f_mapping->host;
1315 struct ubifs_info *c = inode->i_sb->s_fs_info;
1316 int err;
1317
1318 dbg_gen("syncing inode %lu", inode->i_ino);
1319
1320 if (c->ro_mount)
1321
1322
1323
1324
1325 return 0;
1326
1327 err = file_write_and_wait_range(file, start, end);
1328 if (err)
1329 return err;
1330 inode_lock(inode);
1331
1332
1333 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1334 err = inode->i_sb->s_op->write_inode(inode, NULL);
1335 if (err)
1336 goto out;
1337 }
1338
1339
1340
1341
1342
1343 err = ubifs_sync_wbufs_by_inode(c, inode);
1344 out:
1345 inode_unlock(inode);
1346 return err;
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 static inline int mctime_update_needed(const struct inode *inode,
1359 const struct timespec64 *now)
1360 {
1361 if (!timespec64_equal(&inode->i_mtime, now) ||
1362 !timespec64_equal(&inode->i_ctime, now))
1363 return 1;
1364 return 0;
1365 }
1366
1367
1368
1369
1370
1371
1372
1373 int ubifs_update_time(struct inode *inode, struct timespec64 *time,
1374 int flags)
1375 {
1376 struct ubifs_inode *ui = ubifs_inode(inode);
1377 struct ubifs_info *c = inode->i_sb->s_fs_info;
1378 struct ubifs_budget_req req = { .dirtied_ino = 1,
1379 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1380 int err, release;
1381
1382 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1383 return generic_update_time(inode, time, flags);
1384
1385 err = ubifs_budget_space(c, &req);
1386 if (err)
1387 return err;
1388
1389 mutex_lock(&ui->ui_mutex);
1390 if (flags & S_ATIME)
1391 inode->i_atime = *time;
1392 if (flags & S_CTIME)
1393 inode->i_ctime = *time;
1394 if (flags & S_MTIME)
1395 inode->i_mtime = *time;
1396
1397 release = ui->dirty;
1398 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1399 mutex_unlock(&ui->ui_mutex);
1400 if (release)
1401 ubifs_release_budget(c, &req);
1402 return 0;
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413 static int update_mctime(struct inode *inode)
1414 {
1415 struct timespec64 now = current_time(inode);
1416 struct ubifs_inode *ui = ubifs_inode(inode);
1417 struct ubifs_info *c = inode->i_sb->s_fs_info;
1418
1419 if (mctime_update_needed(inode, &now)) {
1420 int err, release;
1421 struct ubifs_budget_req req = { .dirtied_ino = 1,
1422 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1423
1424 err = ubifs_budget_space(c, &req);
1425 if (err)
1426 return err;
1427
1428 mutex_lock(&ui->ui_mutex);
1429 inode->i_mtime = inode->i_ctime = current_time(inode);
1430 release = ui->dirty;
1431 mark_inode_dirty_sync(inode);
1432 mutex_unlock(&ui->ui_mutex);
1433 if (release)
1434 ubifs_release_budget(c, &req);
1435 }
1436
1437 return 0;
1438 }
1439
1440 static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1441 {
1442 int err = update_mctime(file_inode(iocb->ki_filp));
1443 if (err)
1444 return err;
1445
1446 return generic_file_write_iter(iocb, from);
1447 }
1448
1449 static bool ubifs_dirty_folio(struct address_space *mapping,
1450 struct folio *folio)
1451 {
1452 bool ret;
1453 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1454
1455 ret = filemap_dirty_folio(mapping, folio);
1456
1457
1458
1459
1460 ubifs_assert(c, ret == false);
1461 return ret;
1462 }
1463
1464 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1465 {
1466 struct inode *inode = folio->mapping->host;
1467 struct ubifs_info *c = inode->i_sb->s_fs_info;
1468
1469
1470
1471
1472
1473 if (folio_test_writeback(folio))
1474 return false;
1475 ubifs_assert(c, folio_test_private(folio));
1476 ubifs_assert(c, 0);
1477 folio_detach_private(folio);
1478 folio_clear_checked(folio);
1479 return true;
1480 }
1481
1482
1483
1484
1485
1486 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1487 {
1488 struct page *page = vmf->page;
1489 struct inode *inode = file_inode(vmf->vma->vm_file);
1490 struct ubifs_info *c = inode->i_sb->s_fs_info;
1491 struct timespec64 now = current_time(inode);
1492 struct ubifs_budget_req req = { .new_page = 1 };
1493 int err, update_time;
1494
1495 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1496 i_size_read(inode));
1497 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1498
1499 if (unlikely(c->ro_error))
1500 return VM_FAULT_SIGBUS;
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 update_time = mctime_update_needed(inode, &now);
1521 if (update_time)
1522
1523
1524
1525
1526 req.dirtied_ino = 1;
1527
1528 err = ubifs_budget_space(c, &req);
1529 if (unlikely(err)) {
1530 if (err == -ENOSPC)
1531 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1532 inode->i_ino);
1533 return VM_FAULT_SIGBUS;
1534 }
1535
1536 lock_page(page);
1537 if (unlikely(page->mapping != inode->i_mapping ||
1538 page_offset(page) > i_size_read(inode))) {
1539
1540 goto sigbus;
1541 }
1542
1543 if (PagePrivate(page))
1544 release_new_page_budget(c);
1545 else {
1546 if (!PageChecked(page))
1547 ubifs_convert_page_budget(c);
1548 attach_page_private(page, (void *)1);
1549 atomic_long_inc(&c->dirty_pg_cnt);
1550 __set_page_dirty_nobuffers(page);
1551 }
1552
1553 if (update_time) {
1554 int release;
1555 struct ubifs_inode *ui = ubifs_inode(inode);
1556
1557 mutex_lock(&ui->ui_mutex);
1558 inode->i_mtime = inode->i_ctime = current_time(inode);
1559 release = ui->dirty;
1560 mark_inode_dirty_sync(inode);
1561 mutex_unlock(&ui->ui_mutex);
1562 if (release)
1563 ubifs_release_dirty_inode_budget(c, ui);
1564 }
1565
1566 wait_for_stable_page(page);
1567 return VM_FAULT_LOCKED;
1568
1569 sigbus:
1570 unlock_page(page);
1571 ubifs_release_budget(c, &req);
1572 return VM_FAULT_SIGBUS;
1573 }
1574
1575 static const struct vm_operations_struct ubifs_file_vm_ops = {
1576 .fault = filemap_fault,
1577 .map_pages = filemap_map_pages,
1578 .page_mkwrite = ubifs_vm_page_mkwrite,
1579 };
1580
1581 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1582 {
1583 int err;
1584
1585 err = generic_file_mmap(file, vma);
1586 if (err)
1587 return err;
1588 vma->vm_ops = &ubifs_file_vm_ops;
1589
1590 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1591 file_accessed(file);
1592
1593 return 0;
1594 }
1595
1596 static const char *ubifs_get_link(struct dentry *dentry,
1597 struct inode *inode,
1598 struct delayed_call *done)
1599 {
1600 struct ubifs_inode *ui = ubifs_inode(inode);
1601
1602 if (!IS_ENCRYPTED(inode))
1603 return ui->data;
1604
1605 if (!dentry)
1606 return ERR_PTR(-ECHILD);
1607
1608 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1609 }
1610
1611 static int ubifs_symlink_getattr(struct user_namespace *mnt_userns,
1612 const struct path *path, struct kstat *stat,
1613 u32 request_mask, unsigned int query_flags)
1614 {
1615 ubifs_getattr(mnt_userns, path, stat, request_mask, query_flags);
1616
1617 if (IS_ENCRYPTED(d_inode(path->dentry)))
1618 return fscrypt_symlink_getattr(path, stat);
1619 return 0;
1620 }
1621
1622 const struct address_space_operations ubifs_file_address_operations = {
1623 .read_folio = ubifs_read_folio,
1624 .writepage = ubifs_writepage,
1625 .write_begin = ubifs_write_begin,
1626 .write_end = ubifs_write_end,
1627 .invalidate_folio = ubifs_invalidate_folio,
1628 .dirty_folio = ubifs_dirty_folio,
1629 .migrate_folio = filemap_migrate_folio,
1630 .release_folio = ubifs_release_folio,
1631 };
1632
1633 const struct inode_operations ubifs_file_inode_operations = {
1634 .setattr = ubifs_setattr,
1635 .getattr = ubifs_getattr,
1636 .listxattr = ubifs_listxattr,
1637 .update_time = ubifs_update_time,
1638 .fileattr_get = ubifs_fileattr_get,
1639 .fileattr_set = ubifs_fileattr_set,
1640 };
1641
1642 const struct inode_operations ubifs_symlink_inode_operations = {
1643 .get_link = ubifs_get_link,
1644 .setattr = ubifs_setattr,
1645 .getattr = ubifs_symlink_getattr,
1646 .listxattr = ubifs_listxattr,
1647 .update_time = ubifs_update_time,
1648 };
1649
1650 const struct file_operations ubifs_file_operations = {
1651 .llseek = generic_file_llseek,
1652 .read_iter = generic_file_read_iter,
1653 .write_iter = ubifs_write_iter,
1654 .mmap = ubifs_file_mmap,
1655 .fsync = ubifs_fsync,
1656 .unlocked_ioctl = ubifs_ioctl,
1657 .splice_read = generic_file_splice_read,
1658 .splice_write = iter_file_splice_write,
1659 .open = fscrypt_file_open,
1660 #ifdef CONFIG_COMPAT
1661 .compat_ioctl = ubifs_compat_ioctl,
1662 #endif
1663 };