0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/mount.h>
0009 #include <linux/slab.h>
0010 #include <linux/file.h>
0011 #include <linux/uio.h>
0012 #include <linux/falloc.h>
0013 #include <linux/sched/mm.h>
0014 #include <trace/events/fscache.h>
0015 #include "internal.h"
0016
0017 struct cachefiles_kiocb {
0018 struct kiocb iocb;
0019 refcount_t ki_refcnt;
0020 loff_t start;
0021 union {
0022 size_t skipped;
0023 size_t len;
0024 };
0025 struct cachefiles_object *object;
0026 netfs_io_terminated_t term_func;
0027 void *term_func_priv;
0028 bool was_async;
0029 unsigned int inval_counter;
0030 u64 b_writing;
0031 };
0032
0033 static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
0034 {
0035 if (refcount_dec_and_test(&ki->ki_refcnt)) {
0036 cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
0037 fput(ki->iocb.ki_filp);
0038 kfree(ki);
0039 }
0040 }
0041
0042
0043
0044
0045 static void cachefiles_read_complete(struct kiocb *iocb, long ret)
0046 {
0047 struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
0048 struct inode *inode = file_inode(ki->iocb.ki_filp);
0049
0050 _enter("%ld", ret);
0051
0052 if (ret < 0)
0053 trace_cachefiles_io_error(ki->object, inode, ret,
0054 cachefiles_trace_read_error);
0055
0056 if (ki->term_func) {
0057 if (ret >= 0) {
0058 if (ki->object->cookie->inval_counter == ki->inval_counter)
0059 ki->skipped += ret;
0060 else
0061 ret = -ESTALE;
0062 }
0063
0064 ki->term_func(ki->term_func_priv, ret, ki->was_async);
0065 }
0066
0067 cachefiles_put_kiocb(ki);
0068 }
0069
0070
0071
0072
0073 static int cachefiles_read(struct netfs_cache_resources *cres,
0074 loff_t start_pos,
0075 struct iov_iter *iter,
0076 enum netfs_read_from_hole read_hole,
0077 netfs_io_terminated_t term_func,
0078 void *term_func_priv)
0079 {
0080 struct cachefiles_object *object;
0081 struct cachefiles_kiocb *ki;
0082 struct file *file;
0083 unsigned int old_nofs;
0084 ssize_t ret = -ENOBUFS;
0085 size_t len = iov_iter_count(iter), skipped = 0;
0086
0087 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
0088 goto presubmission_error;
0089
0090 fscache_count_read();
0091 object = cachefiles_cres_object(cres);
0092 file = cachefiles_cres_file(cres);
0093
0094 _enter("%pD,%li,%llx,%zx/%llx",
0095 file, file_inode(file)->i_ino, start_pos, len,
0096 i_size_read(file_inode(file)));
0097
0098
0099
0100
0101 if (read_hole != NETFS_READ_HOLE_IGNORE) {
0102 loff_t off = start_pos, off2;
0103
0104 off2 = cachefiles_inject_read_error();
0105 if (off2 == 0)
0106 off2 = vfs_llseek(file, off, SEEK_DATA);
0107 if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
0108 skipped = 0;
0109 ret = off2;
0110 goto presubmission_error;
0111 }
0112
0113 if (off2 == -ENXIO || off2 >= start_pos + len) {
0114
0115
0116
0117
0118 ret = -ENODATA;
0119 if (read_hole == NETFS_READ_HOLE_FAIL)
0120 goto presubmission_error;
0121
0122 iov_iter_zero(len, iter);
0123 skipped = len;
0124 ret = 0;
0125 goto presubmission_error;
0126 }
0127
0128 skipped = off2 - off;
0129 iov_iter_zero(skipped, iter);
0130 }
0131
0132 ret = -ENOMEM;
0133 ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
0134 if (!ki)
0135 goto presubmission_error;
0136
0137 refcount_set(&ki->ki_refcnt, 2);
0138 ki->iocb.ki_filp = file;
0139 ki->iocb.ki_pos = start_pos + skipped;
0140 ki->iocb.ki_flags = IOCB_DIRECT;
0141 ki->iocb.ki_ioprio = get_current_ioprio();
0142 ki->skipped = skipped;
0143 ki->object = object;
0144 ki->inval_counter = cres->inval_counter;
0145 ki->term_func = term_func;
0146 ki->term_func_priv = term_func_priv;
0147 ki->was_async = true;
0148
0149 if (ki->term_func)
0150 ki->iocb.ki_complete = cachefiles_read_complete;
0151
0152 get_file(ki->iocb.ki_filp);
0153 cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
0154
0155 trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
0156 old_nofs = memalloc_nofs_save();
0157 ret = cachefiles_inject_read_error();
0158 if (ret == 0)
0159 ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
0160 memalloc_nofs_restore(old_nofs);
0161 switch (ret) {
0162 case -EIOCBQUEUED:
0163 goto in_progress;
0164
0165 case -ERESTARTSYS:
0166 case -ERESTARTNOINTR:
0167 case -ERESTARTNOHAND:
0168 case -ERESTART_RESTARTBLOCK:
0169
0170
0171
0172 ret = -EINTR;
0173 fallthrough;
0174 default:
0175 ki->was_async = false;
0176 cachefiles_read_complete(&ki->iocb, ret);
0177 if (ret > 0)
0178 ret = 0;
0179 break;
0180 }
0181
0182 in_progress:
0183 cachefiles_put_kiocb(ki);
0184 _leave(" = %zd", ret);
0185 return ret;
0186
0187 presubmission_error:
0188 if (term_func)
0189 term_func(term_func_priv, ret < 0 ? ret : skipped, false);
0190 return ret;
0191 }
0192
0193
0194
0195
0196
0197 static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
0198 loff_t start, size_t len, size_t granularity,
0199 loff_t *_data_start, size_t *_data_len)
0200 {
0201 struct cachefiles_object *object;
0202 struct file *file;
0203 loff_t off, off2;
0204
0205 *_data_start = -1;
0206 *_data_len = 0;
0207
0208 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
0209 return -ENOBUFS;
0210
0211 object = cachefiles_cres_object(cres);
0212 file = cachefiles_cres_file(cres);
0213 granularity = max_t(size_t, object->volume->cache->bsize, granularity);
0214
0215 _enter("%pD,%li,%llx,%zx/%llx",
0216 file, file_inode(file)->i_ino, start, len,
0217 i_size_read(file_inode(file)));
0218
0219 off = cachefiles_inject_read_error();
0220 if (off == 0)
0221 off = vfs_llseek(file, start, SEEK_DATA);
0222 if (off == -ENXIO)
0223 return -ENODATA;
0224 if (off < 0 && off >= (loff_t)-MAX_ERRNO)
0225 return -ENOBUFS;
0226 if (round_up(off, granularity) >= start + len)
0227 return -ENODATA;
0228
0229 off2 = cachefiles_inject_read_error();
0230 if (off2 == 0)
0231 off2 = vfs_llseek(file, off, SEEK_HOLE);
0232 if (off2 == -ENXIO)
0233 return -ENODATA;
0234 if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
0235 return -ENOBUFS;
0236
0237
0238 off = round_up(off, granularity);
0239 off2 = round_down(off2, granularity);
0240 if (off2 <= off)
0241 return -ENODATA;
0242
0243 *_data_start = off;
0244 if (off2 > start + len)
0245 *_data_len = len;
0246 else
0247 *_data_len = off2 - off;
0248 return 0;
0249 }
0250
0251
0252
0253
0254 static void cachefiles_write_complete(struct kiocb *iocb, long ret)
0255 {
0256 struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
0257 struct cachefiles_object *object = ki->object;
0258 struct inode *inode = file_inode(ki->iocb.ki_filp);
0259
0260 _enter("%ld", ret);
0261
0262
0263 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
0264 __sb_end_write(inode->i_sb, SB_FREEZE_WRITE);
0265
0266 if (ret < 0)
0267 trace_cachefiles_io_error(object, inode, ret,
0268 cachefiles_trace_write_error);
0269
0270 atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
0271 set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
0272 if (ki->term_func)
0273 ki->term_func(ki->term_func_priv, ret, ki->was_async);
0274 cachefiles_put_kiocb(ki);
0275 }
0276
0277
0278
0279
0280 int __cachefiles_write(struct cachefiles_object *object,
0281 struct file *file,
0282 loff_t start_pos,
0283 struct iov_iter *iter,
0284 netfs_io_terminated_t term_func,
0285 void *term_func_priv)
0286 {
0287 struct cachefiles_cache *cache;
0288 struct cachefiles_kiocb *ki;
0289 struct inode *inode;
0290 unsigned int old_nofs;
0291 ssize_t ret;
0292 size_t len = iov_iter_count(iter);
0293
0294 fscache_count_write();
0295 cache = object->volume->cache;
0296
0297 _enter("%pD,%li,%llx,%zx/%llx",
0298 file, file_inode(file)->i_ino, start_pos, len,
0299 i_size_read(file_inode(file)));
0300
0301 ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
0302 if (!ki) {
0303 if (term_func)
0304 term_func(term_func_priv, -ENOMEM, false);
0305 return -ENOMEM;
0306 }
0307
0308 refcount_set(&ki->ki_refcnt, 2);
0309 ki->iocb.ki_filp = file;
0310 ki->iocb.ki_pos = start_pos;
0311 ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
0312 ki->iocb.ki_ioprio = get_current_ioprio();
0313 ki->object = object;
0314 ki->start = start_pos;
0315 ki->len = len;
0316 ki->term_func = term_func;
0317 ki->term_func_priv = term_func_priv;
0318 ki->was_async = true;
0319 ki->b_writing = (len + (1 << cache->bshift) - 1) >> cache->bshift;
0320
0321 if (ki->term_func)
0322 ki->iocb.ki_complete = cachefiles_write_complete;
0323 atomic_long_add(ki->b_writing, &cache->b_writing);
0324
0325
0326
0327
0328
0329
0330 inode = file_inode(file);
0331 __sb_start_write(inode->i_sb, SB_FREEZE_WRITE);
0332 __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
0333
0334 get_file(ki->iocb.ki_filp);
0335 cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
0336
0337 trace_cachefiles_write(object, inode, ki->iocb.ki_pos, len);
0338 old_nofs = memalloc_nofs_save();
0339 ret = cachefiles_inject_write_error();
0340 if (ret == 0)
0341 ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
0342 memalloc_nofs_restore(old_nofs);
0343 switch (ret) {
0344 case -EIOCBQUEUED:
0345 goto in_progress;
0346
0347 case -ERESTARTSYS:
0348 case -ERESTARTNOINTR:
0349 case -ERESTARTNOHAND:
0350 case -ERESTART_RESTARTBLOCK:
0351
0352
0353
0354 ret = -EINTR;
0355 fallthrough;
0356 default:
0357 ki->was_async = false;
0358 cachefiles_write_complete(&ki->iocb, ret);
0359 if (ret > 0)
0360 ret = 0;
0361 break;
0362 }
0363
0364 in_progress:
0365 cachefiles_put_kiocb(ki);
0366 _leave(" = %zd", ret);
0367 return ret;
0368 }
0369
0370 static int cachefiles_write(struct netfs_cache_resources *cres,
0371 loff_t start_pos,
0372 struct iov_iter *iter,
0373 netfs_io_terminated_t term_func,
0374 void *term_func_priv)
0375 {
0376 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
0377 if (term_func)
0378 term_func(term_func_priv, -ENOBUFS, false);
0379 return -ENOBUFS;
0380 }
0381
0382 return __cachefiles_write(cachefiles_cres_object(cres),
0383 cachefiles_cres_file(cres),
0384 start_pos, iter,
0385 term_func, term_func_priv);
0386 }
0387
0388
0389
0390
0391
0392 static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
0393 loff_t i_size)
0394 {
0395 enum cachefiles_prepare_read_trace why;
0396 struct netfs_io_request *rreq = subreq->rreq;
0397 struct netfs_cache_resources *cres = &rreq->cache_resources;
0398 struct cachefiles_object *object;
0399 struct cachefiles_cache *cache;
0400 struct fscache_cookie *cookie = fscache_cres_cookie(cres);
0401 const struct cred *saved_cred;
0402 struct file *file = cachefiles_cres_file(cres);
0403 enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
0404 loff_t off, to;
0405 ino_t ino = file ? file_inode(file)->i_ino : 0;
0406 int rc;
0407
0408 _enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
0409
0410 if (subreq->start >= i_size) {
0411 ret = NETFS_FILL_WITH_ZEROES;
0412 why = cachefiles_trace_read_after_eof;
0413 goto out_no_object;
0414 }
0415
0416 if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
0417 __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
0418 why = cachefiles_trace_read_no_data;
0419 if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags))
0420 goto out_no_object;
0421 }
0422
0423
0424 if (!file) {
0425 why = cachefiles_trace_read_no_file;
0426 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
0427 goto out_no_object;
0428 file = cachefiles_cres_file(cres);
0429 if (!file)
0430 goto out_no_object;
0431 ino = file_inode(file)->i_ino;
0432 }
0433
0434 object = cachefiles_cres_object(cres);
0435 cache = object->volume->cache;
0436 cachefiles_begin_secure(cache, &saved_cred);
0437 retry:
0438 off = cachefiles_inject_read_error();
0439 if (off == 0)
0440 off = vfs_llseek(file, subreq->start, SEEK_DATA);
0441 if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
0442 if (off == (loff_t)-ENXIO) {
0443 why = cachefiles_trace_read_seek_nxio;
0444 goto download_and_store;
0445 }
0446 trace_cachefiles_io_error(object, file_inode(file), off,
0447 cachefiles_trace_seek_error);
0448 why = cachefiles_trace_read_seek_error;
0449 goto out;
0450 }
0451
0452 if (off >= subreq->start + subreq->len) {
0453 why = cachefiles_trace_read_found_hole;
0454 goto download_and_store;
0455 }
0456
0457 if (off > subreq->start) {
0458 off = round_up(off, cache->bsize);
0459 subreq->len = off - subreq->start;
0460 why = cachefiles_trace_read_found_part;
0461 goto download_and_store;
0462 }
0463
0464 to = cachefiles_inject_read_error();
0465 if (to == 0)
0466 to = vfs_llseek(file, subreq->start, SEEK_HOLE);
0467 if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
0468 trace_cachefiles_io_error(object, file_inode(file), to,
0469 cachefiles_trace_seek_error);
0470 why = cachefiles_trace_read_seek_error;
0471 goto out;
0472 }
0473
0474 if (to < subreq->start + subreq->len) {
0475 if (subreq->start + subreq->len >= i_size)
0476 to = round_up(to, cache->bsize);
0477 else
0478 to = round_down(to, cache->bsize);
0479 subreq->len = to - subreq->start;
0480 }
0481
0482 why = cachefiles_trace_read_have_data;
0483 ret = NETFS_READ_FROM_CACHE;
0484 goto out;
0485
0486 download_and_store:
0487 __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
0488 if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) {
0489 rc = cachefiles_ondemand_read(object, subreq->start,
0490 subreq->len);
0491 if (!rc) {
0492 __clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags);
0493 goto retry;
0494 }
0495 ret = NETFS_INVALID_READ;
0496 }
0497 out:
0498 cachefiles_end_secure(cache, saved_cred);
0499 out_no_object:
0500 trace_cachefiles_prep_read(subreq, ret, why, ino);
0501 return ret;
0502 }
0503
0504
0505
0506
0507 int __cachefiles_prepare_write(struct cachefiles_object *object,
0508 struct file *file,
0509 loff_t *_start, size_t *_len,
0510 bool no_space_allocated_yet)
0511 {
0512 struct cachefiles_cache *cache = object->volume->cache;
0513 loff_t start = *_start, pos;
0514 size_t len = *_len, down;
0515 int ret;
0516
0517
0518 down = start - round_down(start, PAGE_SIZE);
0519 *_start = start - down;
0520 *_len = round_up(down + len, PAGE_SIZE);
0521
0522
0523
0524
0525
0526 if (no_space_allocated_yet)
0527 goto check_space;
0528
0529 pos = cachefiles_inject_read_error();
0530 if (pos == 0)
0531 pos = vfs_llseek(file, *_start, SEEK_DATA);
0532 if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
0533 if (pos == -ENXIO)
0534 goto check_space;
0535 trace_cachefiles_io_error(object, file_inode(file), pos,
0536 cachefiles_trace_seek_error);
0537 return pos;
0538 }
0539 if ((u64)pos >= (u64)*_start + *_len)
0540 goto check_space;
0541
0542
0543
0544
0545
0546 if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
0547 cachefiles_has_space_check) == 0)
0548 return 0;
0549
0550 pos = cachefiles_inject_read_error();
0551 if (pos == 0)
0552 pos = vfs_llseek(file, *_start, SEEK_HOLE);
0553 if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
0554 trace_cachefiles_io_error(object, file_inode(file), pos,
0555 cachefiles_trace_seek_error);
0556 return pos;
0557 }
0558 if ((u64)pos >= (u64)*_start + *_len)
0559 return 0;
0560
0561
0562 fscache_count_no_write_space();
0563 ret = cachefiles_inject_remove_error();
0564 if (ret == 0)
0565 ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
0566 *_start, *_len);
0567 if (ret < 0) {
0568 trace_cachefiles_io_error(object, file_inode(file), ret,
0569 cachefiles_trace_fallocate_error);
0570 cachefiles_io_error_obj(object,
0571 "CacheFiles: fallocate failed (%d)\n", ret);
0572 ret = -EIO;
0573 }
0574
0575 return ret;
0576
0577 check_space:
0578 return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
0579 cachefiles_has_space_for_write);
0580 }
0581
0582 static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
0583 loff_t *_start, size_t *_len, loff_t i_size,
0584 bool no_space_allocated_yet)
0585 {
0586 struct cachefiles_object *object = cachefiles_cres_object(cres);
0587 struct cachefiles_cache *cache = object->volume->cache;
0588 const struct cred *saved_cred;
0589 int ret;
0590
0591 if (!cachefiles_cres_file(cres)) {
0592 if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
0593 return -ENOBUFS;
0594 if (!cachefiles_cres_file(cres))
0595 return -ENOBUFS;
0596 }
0597
0598 cachefiles_begin_secure(cache, &saved_cred);
0599 ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
0600 _start, _len,
0601 no_space_allocated_yet);
0602 cachefiles_end_secure(cache, saved_cred);
0603 return ret;
0604 }
0605
0606
0607
0608
0609 static void cachefiles_end_operation(struct netfs_cache_resources *cres)
0610 {
0611 struct file *file = cachefiles_cres_file(cres);
0612
0613 if (file)
0614 fput(file);
0615 fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
0616 }
0617
0618 static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
0619 .end_operation = cachefiles_end_operation,
0620 .read = cachefiles_read,
0621 .write = cachefiles_write,
0622 .prepare_read = cachefiles_prepare_read,
0623 .prepare_write = cachefiles_prepare_write,
0624 .query_occupancy = cachefiles_query_occupancy,
0625 };
0626
0627
0628
0629
0630 bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
0631 enum fscache_want_state want_state)
0632 {
0633 struct cachefiles_object *object = cachefiles_cres_object(cres);
0634
0635 if (!cachefiles_cres_file(cres)) {
0636 cres->ops = &cachefiles_netfs_cache_ops;
0637 if (object->file) {
0638 spin_lock(&object->lock);
0639 if (!cres->cache_priv2 && object->file)
0640 cres->cache_priv2 = get_file(object->file);
0641 spin_unlock(&object->lock);
0642 }
0643 }
0644
0645 if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
0646 pr_err("failed to get cres->file\n");
0647 return false;
0648 }
0649
0650 return true;
0651 }