0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/backing-dev.h>
0009 #include <linux/slab.h>
0010 #include <linux/fs.h>
0011 #include <linux/pagemap.h>
0012 #include <linux/writeback.h>
0013 #include <linux/pagevec.h>
0014 #include <linux/netfs.h>
0015 #include "internal.h"
0016
0017 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
0018 loff_t i_size, bool caching);
0019
0020 #ifdef CONFIG_AFS_FSCACHE
0021
0022
0023
0024
0025 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
0026 {
0027 return fscache_dirty_folio(mapping, folio,
0028 afs_vnode_cache(AFS_FS_I(mapping->host)));
0029 }
0030 static void afs_folio_start_fscache(bool caching, struct folio *folio)
0031 {
0032 if (caching)
0033 folio_start_fscache(folio);
0034 }
0035 #else
0036 static void afs_folio_start_fscache(bool caching, struct folio *folio)
0037 {
0038 }
0039 #endif
0040
0041
0042
0043
0044 int afs_write_begin(struct file *file, struct address_space *mapping,
0045 loff_t pos, unsigned len,
0046 struct page **_page, void **fsdata)
0047 {
0048 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
0049 struct folio *folio;
0050 unsigned long priv;
0051 unsigned f, from;
0052 unsigned t, to;
0053 pgoff_t index;
0054 int ret;
0055
0056 _enter("{%llx:%llu},%llx,%x",
0057 vnode->fid.vid, vnode->fid.vnode, pos, len);
0058
0059
0060
0061
0062
0063 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
0064 if (ret < 0)
0065 return ret;
0066
0067 index = folio_index(folio);
0068 from = pos - index * PAGE_SIZE;
0069 to = from + len;
0070
0071 try_again:
0072
0073
0074
0075 if (folio_test_private(folio)) {
0076 priv = (unsigned long)folio_get_private(folio);
0077 f = afs_folio_dirty_from(folio, priv);
0078 t = afs_folio_dirty_to(folio, priv);
0079 ASSERTCMP(f, <=, t);
0080
0081 if (folio_test_writeback(folio)) {
0082 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
0083 goto flush_conflicting_write;
0084 }
0085
0086
0087
0088
0089 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
0090 (to < f || from > t))
0091 goto flush_conflicting_write;
0092 }
0093
0094 *_page = folio_file_page(folio, pos / PAGE_SIZE);
0095 _leave(" = 0");
0096 return 0;
0097
0098
0099
0100
0101 flush_conflicting_write:
0102 _debug("flush conflict");
0103 ret = folio_write_one(folio);
0104 if (ret < 0)
0105 goto error;
0106
0107 ret = folio_lock_killable(folio);
0108 if (ret < 0)
0109 goto error;
0110 goto try_again;
0111
0112 error:
0113 folio_put(folio);
0114 _leave(" = %d", ret);
0115 return ret;
0116 }
0117
0118
0119
0120
0121 int afs_write_end(struct file *file, struct address_space *mapping,
0122 loff_t pos, unsigned len, unsigned copied,
0123 struct page *subpage, void *fsdata)
0124 {
0125 struct folio *folio = page_folio(subpage);
0126 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
0127 unsigned long priv;
0128 unsigned int f, from = offset_in_folio(folio, pos);
0129 unsigned int t, to = from + copied;
0130 loff_t i_size, write_end_pos;
0131
0132 _enter("{%llx:%llu},{%lx}",
0133 vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
0134
0135 if (!folio_test_uptodate(folio)) {
0136 if (copied < len) {
0137 copied = 0;
0138 goto out;
0139 }
0140
0141 folio_mark_uptodate(folio);
0142 }
0143
0144 if (copied == 0)
0145 goto out;
0146
0147 write_end_pos = pos + copied;
0148
0149 i_size = i_size_read(&vnode->netfs.inode);
0150 if (write_end_pos > i_size) {
0151 write_seqlock(&vnode->cb_lock);
0152 i_size = i_size_read(&vnode->netfs.inode);
0153 if (write_end_pos > i_size)
0154 afs_set_i_size(vnode, write_end_pos);
0155 write_sequnlock(&vnode->cb_lock);
0156 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
0157 }
0158
0159 if (folio_test_private(folio)) {
0160 priv = (unsigned long)folio_get_private(folio);
0161 f = afs_folio_dirty_from(folio, priv);
0162 t = afs_folio_dirty_to(folio, priv);
0163 if (from < f)
0164 f = from;
0165 if (to > t)
0166 t = to;
0167 priv = afs_folio_dirty(folio, f, t);
0168 folio_change_private(folio, (void *)priv);
0169 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
0170 } else {
0171 priv = afs_folio_dirty(folio, from, to);
0172 folio_attach_private(folio, (void *)priv);
0173 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
0174 }
0175
0176 if (folio_mark_dirty(folio))
0177 _debug("dirtied %lx", folio_index(folio));
0178
0179 out:
0180 folio_unlock(folio);
0181 folio_put(folio);
0182 return copied;
0183 }
0184
0185
0186
0187
0188 static void afs_kill_pages(struct address_space *mapping,
0189 loff_t start, loff_t len)
0190 {
0191 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
0192 struct folio *folio;
0193 pgoff_t index = start / PAGE_SIZE;
0194 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
0195
0196 _enter("{%llx:%llu},%llx @%llx",
0197 vnode->fid.vid, vnode->fid.vnode, len, start);
0198
0199 do {
0200 _debug("kill %lx (to %lx)", index, last);
0201
0202 folio = filemap_get_folio(mapping, index);
0203 if (!folio) {
0204 next = index + 1;
0205 continue;
0206 }
0207
0208 next = folio_next_index(folio);
0209
0210 folio_clear_uptodate(folio);
0211 folio_end_writeback(folio);
0212 folio_lock(folio);
0213 generic_error_remove_page(mapping, &folio->page);
0214 folio_unlock(folio);
0215 folio_put(folio);
0216
0217 } while (index = next, index <= last);
0218
0219 _leave("");
0220 }
0221
0222
0223
0224
0225 static void afs_redirty_pages(struct writeback_control *wbc,
0226 struct address_space *mapping,
0227 loff_t start, loff_t len)
0228 {
0229 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
0230 struct folio *folio;
0231 pgoff_t index = start / PAGE_SIZE;
0232 pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
0233
0234 _enter("{%llx:%llu},%llx @%llx",
0235 vnode->fid.vid, vnode->fid.vnode, len, start);
0236
0237 do {
0238 _debug("redirty %llx @%llx", len, start);
0239
0240 folio = filemap_get_folio(mapping, index);
0241 if (!folio) {
0242 next = index + 1;
0243 continue;
0244 }
0245
0246 next = index + folio_nr_pages(folio);
0247 folio_redirty_for_writepage(wbc, folio);
0248 folio_end_writeback(folio);
0249 folio_put(folio);
0250 } while (index = next, index <= last);
0251
0252 _leave("");
0253 }
0254
0255
0256
0257
0258 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
0259 {
0260 struct address_space *mapping = vnode->netfs.inode.i_mapping;
0261 struct folio *folio;
0262 pgoff_t end;
0263
0264 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
0265
0266 _enter("{%llx:%llu},{%x @%llx}",
0267 vnode->fid.vid, vnode->fid.vnode, len, start);
0268
0269 rcu_read_lock();
0270
0271 end = (start + len - 1) / PAGE_SIZE;
0272 xas_for_each(&xas, folio, end) {
0273 if (!folio_test_writeback(folio)) {
0274 kdebug("bad %x @%llx page %lx %lx",
0275 len, start, folio_index(folio), end);
0276 ASSERT(folio_test_writeback(folio));
0277 }
0278
0279 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
0280 folio_detach_private(folio);
0281 folio_end_writeback(folio);
0282 }
0283
0284 rcu_read_unlock();
0285
0286 afs_prune_wb_keys(vnode);
0287 _leave("");
0288 }
0289
0290
0291
0292
0293
0294
0295 static int afs_get_writeback_key(struct afs_vnode *vnode,
0296 struct afs_wb_key **_wbk)
0297 {
0298 struct afs_wb_key *wbk = NULL;
0299 struct list_head *p;
0300 int ret = -ENOKEY, ret2;
0301
0302 spin_lock(&vnode->wb_lock);
0303 if (*_wbk)
0304 p = (*_wbk)->vnode_link.next;
0305 else
0306 p = vnode->wb_keys.next;
0307
0308 while (p != &vnode->wb_keys) {
0309 wbk = list_entry(p, struct afs_wb_key, vnode_link);
0310 _debug("wbk %u", key_serial(wbk->key));
0311 ret2 = key_validate(wbk->key);
0312 if (ret2 == 0) {
0313 refcount_inc(&wbk->usage);
0314 _debug("USE WB KEY %u", key_serial(wbk->key));
0315 break;
0316 }
0317
0318 wbk = NULL;
0319 if (ret == -ENOKEY)
0320 ret = ret2;
0321 p = p->next;
0322 }
0323
0324 spin_unlock(&vnode->wb_lock);
0325 if (*_wbk)
0326 afs_put_wb_key(*_wbk);
0327 *_wbk = wbk;
0328 return 0;
0329 }
0330
0331 static void afs_store_data_success(struct afs_operation *op)
0332 {
0333 struct afs_vnode *vnode = op->file[0].vnode;
0334
0335 op->ctime = op->file[0].scb.status.mtime_client;
0336 afs_vnode_commit_status(op, &op->file[0]);
0337 if (op->error == 0) {
0338 if (!op->store.laundering)
0339 afs_pages_written_back(vnode, op->store.pos, op->store.size);
0340 afs_stat_v(vnode, n_stores);
0341 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
0342 }
0343 }
0344
0345 static const struct afs_operation_ops afs_store_data_operation = {
0346 .issue_afs_rpc = afs_fs_store_data,
0347 .issue_yfs_rpc = yfs_fs_store_data,
0348 .success = afs_store_data_success,
0349 };
0350
0351
0352
0353
0354 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
0355 bool laundering)
0356 {
0357 struct afs_operation *op;
0358 struct afs_wb_key *wbk = NULL;
0359 loff_t size = iov_iter_count(iter);
0360 int ret = -ENOKEY;
0361
0362 _enter("%s{%llx:%llu.%u},%llx,%llx",
0363 vnode->volume->name,
0364 vnode->fid.vid,
0365 vnode->fid.vnode,
0366 vnode->fid.unique,
0367 size, pos);
0368
0369 ret = afs_get_writeback_key(vnode, &wbk);
0370 if (ret) {
0371 _leave(" = %d [no keys]", ret);
0372 return ret;
0373 }
0374
0375 op = afs_alloc_operation(wbk->key, vnode->volume);
0376 if (IS_ERR(op)) {
0377 afs_put_wb_key(wbk);
0378 return -ENOMEM;
0379 }
0380
0381 afs_op_set_vnode(op, 0, vnode);
0382 op->file[0].dv_delta = 1;
0383 op->file[0].modification = true;
0384 op->store.write_iter = iter;
0385 op->store.pos = pos;
0386 op->store.size = size;
0387 op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
0388 op->store.laundering = laundering;
0389 op->mtime = vnode->netfs.inode.i_mtime;
0390 op->flags |= AFS_OPERATION_UNINTR;
0391 op->ops = &afs_store_data_operation;
0392
0393 try_next_key:
0394 afs_begin_vnode_operation(op);
0395 afs_wait_for_operation(op);
0396
0397 switch (op->error) {
0398 case -EACCES:
0399 case -EPERM:
0400 case -ENOKEY:
0401 case -EKEYEXPIRED:
0402 case -EKEYREJECTED:
0403 case -EKEYREVOKED:
0404 _debug("next");
0405
0406 ret = afs_get_writeback_key(vnode, &wbk);
0407 if (ret == 0) {
0408 key_put(op->key);
0409 op->key = key_get(wbk->key);
0410 goto try_next_key;
0411 }
0412 break;
0413 }
0414
0415 afs_put_wb_key(wbk);
0416 _leave(" = %d", op->error);
0417 return afs_put_operation(op);
0418 }
0419
0420
0421
0422
0423
0424
0425
0426
0427 static void afs_extend_writeback(struct address_space *mapping,
0428 struct afs_vnode *vnode,
0429 long *_count,
0430 loff_t start,
0431 loff_t max_len,
0432 bool new_content,
0433 bool caching,
0434 unsigned int *_len)
0435 {
0436 struct pagevec pvec;
0437 struct folio *folio;
0438 unsigned long priv;
0439 unsigned int psize, filler = 0;
0440 unsigned int f, t;
0441 loff_t len = *_len;
0442 pgoff_t index = (start + len) / PAGE_SIZE;
0443 bool stop = true;
0444 unsigned int i;
0445
0446 XA_STATE(xas, &mapping->i_pages, index);
0447 pagevec_init(&pvec);
0448
0449 do {
0450
0451
0452
0453
0454 rcu_read_lock();
0455
0456 xas_for_each(&xas, folio, ULONG_MAX) {
0457 stop = true;
0458 if (xas_retry(&xas, folio))
0459 continue;
0460 if (xa_is_value(folio))
0461 break;
0462 if (folio_index(folio) != index)
0463 break;
0464
0465 if (!folio_try_get_rcu(folio)) {
0466 xas_reset(&xas);
0467 continue;
0468 }
0469
0470
0471 if (unlikely(folio != xas_reload(&xas))) {
0472 folio_put(folio);
0473 break;
0474 }
0475
0476 if (!folio_trylock(folio)) {
0477 folio_put(folio);
0478 break;
0479 }
0480 if (!folio_test_dirty(folio) ||
0481 folio_test_writeback(folio) ||
0482 folio_test_fscache(folio)) {
0483 folio_unlock(folio);
0484 folio_put(folio);
0485 break;
0486 }
0487
0488 psize = folio_size(folio);
0489 priv = (unsigned long)folio_get_private(folio);
0490 f = afs_folio_dirty_from(folio, priv);
0491 t = afs_folio_dirty_to(folio, priv);
0492 if (f != 0 && !new_content) {
0493 folio_unlock(folio);
0494 folio_put(folio);
0495 break;
0496 }
0497
0498 len += filler + t;
0499 filler = psize - t;
0500 if (len >= max_len || *_count <= 0)
0501 stop = true;
0502 else if (t == psize || new_content)
0503 stop = false;
0504
0505 index += folio_nr_pages(folio);
0506 if (!pagevec_add(&pvec, &folio->page))
0507 break;
0508 if (stop)
0509 break;
0510 }
0511
0512 if (!stop)
0513 xas_pause(&xas);
0514 rcu_read_unlock();
0515
0516
0517
0518
0519 if (!pagevec_count(&pvec))
0520 break;
0521
0522 for (i = 0; i < pagevec_count(&pvec); i++) {
0523 folio = page_folio(pvec.pages[i]);
0524 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
0525
0526 if (!folio_clear_dirty_for_io(folio))
0527 BUG();
0528 if (folio_start_writeback(folio))
0529 BUG();
0530 afs_folio_start_fscache(caching, folio);
0531
0532 *_count -= folio_nr_pages(folio);
0533 folio_unlock(folio);
0534 }
0535
0536 pagevec_release(&pvec);
0537 cond_resched();
0538 } while (!stop);
0539
0540 *_len = len;
0541 }
0542
0543
0544
0545
0546
0547 static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
0548 struct writeback_control *wbc,
0549 struct folio *folio,
0550 loff_t start, loff_t end)
0551 {
0552 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
0553 struct iov_iter iter;
0554 unsigned long priv;
0555 unsigned int offset, to, len, max_len;
0556 loff_t i_size = i_size_read(&vnode->netfs.inode);
0557 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
0558 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
0559 long count = wbc->nr_to_write;
0560 int ret;
0561
0562 _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
0563
0564 if (folio_start_writeback(folio))
0565 BUG();
0566 afs_folio_start_fscache(caching, folio);
0567
0568 count -= folio_nr_pages(folio);
0569
0570
0571
0572
0573
0574
0575 priv = (unsigned long)folio_get_private(folio);
0576 offset = afs_folio_dirty_from(folio, priv);
0577 to = afs_folio_dirty_to(folio, priv);
0578 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
0579
0580 len = to - offset;
0581 start += offset;
0582 if (start < i_size) {
0583
0584
0585
0586 max_len = 65536 * 4096;
0587 max_len = min_t(unsigned long long, max_len, end - start + 1);
0588 max_len = min_t(unsigned long long, max_len, i_size - start);
0589
0590 if (len < max_len &&
0591 (to == folio_size(folio) || new_content))
0592 afs_extend_writeback(mapping, vnode, &count,
0593 start, max_len, new_content,
0594 caching, &len);
0595 len = min_t(loff_t, len, max_len);
0596 }
0597
0598
0599
0600
0601
0602 folio_unlock(folio);
0603
0604 if (start < i_size) {
0605 _debug("write back %x @%llx [%llx]", len, start, i_size);
0606
0607
0608
0609
0610 afs_write_to_cache(vnode, start, len, i_size, caching);
0611
0612 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
0613 ret = afs_store_data(vnode, &iter, start, false);
0614 } else {
0615 _debug("write discard %x @%llx [%llx]", len, start, i_size);
0616
0617
0618 fscache_clear_page_bits(mapping, start, len, caching);
0619 afs_pages_written_back(vnode, start, len);
0620 ret = 0;
0621 }
0622
0623 switch (ret) {
0624 case 0:
0625 wbc->nr_to_write = count;
0626 ret = len;
0627 break;
0628
0629 default:
0630 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
0631 fallthrough;
0632 case -EACCES:
0633 case -EPERM:
0634 case -ENOKEY:
0635 case -EKEYEXPIRED:
0636 case -EKEYREJECTED:
0637 case -EKEYREVOKED:
0638 case -ENETRESET:
0639 afs_redirty_pages(wbc, mapping, start, len);
0640 mapping_set_error(mapping, ret);
0641 break;
0642
0643 case -EDQUOT:
0644 case -ENOSPC:
0645 afs_redirty_pages(wbc, mapping, start, len);
0646 mapping_set_error(mapping, -ENOSPC);
0647 break;
0648
0649 case -EROFS:
0650 case -EIO:
0651 case -EREMOTEIO:
0652 case -EFBIG:
0653 case -ENOENT:
0654 case -ENOMEDIUM:
0655 case -ENXIO:
0656 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
0657 afs_kill_pages(mapping, start, len);
0658 mapping_set_error(mapping, ret);
0659 break;
0660 }
0661
0662 _leave(" = %d", ret);
0663 return ret;
0664 }
0665
0666
0667
0668
0669
0670 int afs_writepage(struct page *subpage, struct writeback_control *wbc)
0671 {
0672 struct folio *folio = page_folio(subpage);
0673 ssize_t ret;
0674 loff_t start;
0675
0676 _enter("{%lx},", folio_index(folio));
0677
0678 #ifdef CONFIG_AFS_FSCACHE
0679 folio_wait_fscache(folio);
0680 #endif
0681
0682 start = folio_index(folio) * PAGE_SIZE;
0683 ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
0684 folio, start, LLONG_MAX - start);
0685 if (ret < 0) {
0686 _leave(" = %zd", ret);
0687 return ret;
0688 }
0689
0690 _leave(" = 0");
0691 return 0;
0692 }
0693
0694
0695
0696
0697 static int afs_writepages_region(struct address_space *mapping,
0698 struct writeback_control *wbc,
0699 loff_t start, loff_t end, loff_t *_next)
0700 {
0701 struct folio *folio;
0702 struct page *head_page;
0703 ssize_t ret;
0704 int n, skips = 0;
0705
0706 _enter("%llx,%llx,", start, end);
0707
0708 do {
0709 pgoff_t index = start / PAGE_SIZE;
0710
0711 n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
0712 PAGECACHE_TAG_DIRTY, 1, &head_page);
0713 if (!n)
0714 break;
0715
0716 folio = page_folio(head_page);
0717 start = folio_pos(folio);
0718
0719 _debug("wback %lx", folio_index(folio));
0720
0721
0722
0723
0724
0725
0726 if (wbc->sync_mode != WB_SYNC_NONE) {
0727 ret = folio_lock_killable(folio);
0728 if (ret < 0) {
0729 folio_put(folio);
0730 return ret;
0731 }
0732 } else {
0733 if (!folio_trylock(folio)) {
0734 folio_put(folio);
0735 return 0;
0736 }
0737 }
0738
0739 if (folio_mapping(folio) != mapping ||
0740 !folio_test_dirty(folio)) {
0741 start += folio_size(folio);
0742 folio_unlock(folio);
0743 folio_put(folio);
0744 continue;
0745 }
0746
0747 if (folio_test_writeback(folio) ||
0748 folio_test_fscache(folio)) {
0749 folio_unlock(folio);
0750 if (wbc->sync_mode != WB_SYNC_NONE) {
0751 folio_wait_writeback(folio);
0752 #ifdef CONFIG_AFS_FSCACHE
0753 folio_wait_fscache(folio);
0754 #endif
0755 } else {
0756 start += folio_size(folio);
0757 }
0758 folio_put(folio);
0759 if (wbc->sync_mode == WB_SYNC_NONE) {
0760 if (skips >= 5 || need_resched())
0761 break;
0762 skips++;
0763 }
0764 continue;
0765 }
0766
0767 if (!folio_clear_dirty_for_io(folio))
0768 BUG();
0769 ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
0770 folio_put(folio);
0771 if (ret < 0) {
0772 _leave(" = %zd", ret);
0773 return ret;
0774 }
0775
0776 start += ret;
0777
0778 cond_resched();
0779 } while (wbc->nr_to_write > 0);
0780
0781 *_next = start;
0782 _leave(" = 0 [%llx]", *_next);
0783 return 0;
0784 }
0785
0786
0787
0788
0789 int afs_writepages(struct address_space *mapping,
0790 struct writeback_control *wbc)
0791 {
0792 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
0793 loff_t start, next;
0794 int ret;
0795
0796 _enter("");
0797
0798
0799
0800
0801
0802 if (wbc->sync_mode == WB_SYNC_ALL)
0803 down_read(&vnode->validate_lock);
0804 else if (!down_read_trylock(&vnode->validate_lock))
0805 return 0;
0806
0807 if (wbc->range_cyclic) {
0808 start = mapping->writeback_index * PAGE_SIZE;
0809 ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
0810 if (ret == 0) {
0811 mapping->writeback_index = next / PAGE_SIZE;
0812 if (start > 0 && wbc->nr_to_write > 0) {
0813 ret = afs_writepages_region(mapping, wbc, 0,
0814 start, &next);
0815 if (ret == 0)
0816 mapping->writeback_index =
0817 next / PAGE_SIZE;
0818 }
0819 }
0820 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
0821 ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
0822 if (wbc->nr_to_write > 0 && ret == 0)
0823 mapping->writeback_index = next / PAGE_SIZE;
0824 } else {
0825 ret = afs_writepages_region(mapping, wbc,
0826 wbc->range_start, wbc->range_end, &next);
0827 }
0828
0829 up_read(&vnode->validate_lock);
0830 _leave(" = %d", ret);
0831 return ret;
0832 }
0833
0834
0835
0836
0837 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
0838 {
0839 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
0840 struct afs_file *af = iocb->ki_filp->private_data;
0841 ssize_t result;
0842 size_t count = iov_iter_count(from);
0843
0844 _enter("{%llx:%llu},{%zu},",
0845 vnode->fid.vid, vnode->fid.vnode, count);
0846
0847 if (IS_SWAPFILE(&vnode->netfs.inode)) {
0848 printk(KERN_INFO
0849 "AFS: Attempt to write to active swap file!\n");
0850 return -EBUSY;
0851 }
0852
0853 if (!count)
0854 return 0;
0855
0856 result = afs_validate(vnode, af->key);
0857 if (result < 0)
0858 return result;
0859
0860 result = generic_file_write_iter(iocb, from);
0861
0862 _leave(" = %zd", result);
0863 return result;
0864 }
0865
0866
0867
0868
0869
0870
0871 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
0872 {
0873 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
0874 struct afs_file *af = file->private_data;
0875 int ret;
0876
0877 _enter("{%llx:%llu},{n=%pD},%d",
0878 vnode->fid.vid, vnode->fid.vnode, file,
0879 datasync);
0880
0881 ret = afs_validate(vnode, af->key);
0882 if (ret < 0)
0883 return ret;
0884
0885 return file_write_and_wait_range(file, start, end);
0886 }
0887
0888
0889
0890
0891
0892 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
0893 {
0894 struct folio *folio = page_folio(vmf->page);
0895 struct file *file = vmf->vma->vm_file;
0896 struct inode *inode = file_inode(file);
0897 struct afs_vnode *vnode = AFS_FS_I(inode);
0898 struct afs_file *af = file->private_data;
0899 unsigned long priv;
0900 vm_fault_t ret = VM_FAULT_RETRY;
0901
0902 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
0903
0904 afs_validate(vnode, af->key);
0905
0906 sb_start_pagefault(inode->i_sb);
0907
0908
0909
0910
0911 #ifdef CONFIG_AFS_FSCACHE
0912 if (folio_test_fscache(folio) &&
0913 folio_wait_fscache_killable(folio) < 0)
0914 goto out;
0915 #endif
0916
0917 if (folio_wait_writeback_killable(folio))
0918 goto out;
0919
0920 if (folio_lock_killable(folio) < 0)
0921 goto out;
0922
0923
0924
0925
0926
0927 if (folio_wait_writeback_killable(folio) < 0) {
0928 folio_unlock(folio);
0929 goto out;
0930 }
0931
0932 priv = afs_folio_dirty(folio, 0, folio_size(folio));
0933 priv = afs_folio_dirty_mmapped(priv);
0934 if (folio_test_private(folio)) {
0935 folio_change_private(folio, (void *)priv);
0936 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
0937 } else {
0938 folio_attach_private(folio, (void *)priv);
0939 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
0940 }
0941 file_update_time(file);
0942
0943 ret = VM_FAULT_LOCKED;
0944 out:
0945 sb_end_pagefault(inode->i_sb);
0946 return ret;
0947 }
0948
0949
0950
0951
0952 void afs_prune_wb_keys(struct afs_vnode *vnode)
0953 {
0954 LIST_HEAD(graveyard);
0955 struct afs_wb_key *wbk, *tmp;
0956
0957
0958 spin_lock(&vnode->wb_lock);
0959
0960 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
0961 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
0962 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
0963 if (refcount_read(&wbk->usage) == 1)
0964 list_move(&wbk->vnode_link, &graveyard);
0965 }
0966 }
0967
0968 spin_unlock(&vnode->wb_lock);
0969
0970 while (!list_empty(&graveyard)) {
0971 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
0972 list_del(&wbk->vnode_link);
0973 afs_put_wb_key(wbk);
0974 }
0975 }
0976
0977
0978
0979
0980 int afs_launder_folio(struct folio *folio)
0981 {
0982 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
0983 struct iov_iter iter;
0984 struct bio_vec bv[1];
0985 unsigned long priv;
0986 unsigned int f, t;
0987 int ret = 0;
0988
0989 _enter("{%lx}", folio->index);
0990
0991 priv = (unsigned long)folio_get_private(folio);
0992 if (folio_clear_dirty_for_io(folio)) {
0993 f = 0;
0994 t = folio_size(folio);
0995 if (folio_test_private(folio)) {
0996 f = afs_folio_dirty_from(folio, priv);
0997 t = afs_folio_dirty_to(folio, priv);
0998 }
0999
1000 bv[0].bv_page = &folio->page;
1001 bv[0].bv_offset = f;
1002 bv[0].bv_len = t - f;
1003 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
1004
1005 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1006 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
1007 }
1008
1009 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1010 folio_detach_private(folio);
1011 folio_wait_fscache(folio);
1012 return ret;
1013 }
1014
1015
1016
1017
1018 static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
1019 bool was_async)
1020 {
1021 struct afs_vnode *vnode = priv;
1022
1023 if (IS_ERR_VALUE(transferred_or_error) &&
1024 transferred_or_error != -ENOBUFS)
1025 afs_invalidate_cache(vnode, 0);
1026 }
1027
1028
1029
1030
1031 static void afs_write_to_cache(struct afs_vnode *vnode,
1032 loff_t start, size_t len, loff_t i_size,
1033 bool caching)
1034 {
1035 fscache_write_to_cache(afs_vnode_cache(vnode),
1036 vnode->netfs.inode.i_mapping, start, len, i_size,
1037 afs_write_to_cache_done, vnode, caching);
1038 }