Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2022, Alibaba Cloud
0004  */
0005 #include <linux/fscache.h>
0006 #include "internal.h"
0007 
0008 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
0009                          loff_t start, size_t len)
0010 {
0011     struct netfs_io_request *rreq;
0012 
0013     rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
0014     if (!rreq)
0015         return ERR_PTR(-ENOMEM);
0016 
0017     rreq->start = start;
0018     rreq->len   = len;
0019     rreq->mapping   = mapping;
0020     rreq->inode = mapping->host;
0021     INIT_LIST_HEAD(&rreq->subrequests);
0022     refcount_set(&rreq->ref, 1);
0023     return rreq;
0024 }
0025 
0026 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
0027 {
0028     if (!refcount_dec_and_test(&rreq->ref))
0029         return;
0030     if (rreq->cache_resources.ops)
0031         rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
0032     kfree(rreq);
0033 }
0034 
0035 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
0036 {
0037     if (!refcount_dec_and_test(&subreq->ref))
0038         return;
0039     erofs_fscache_put_request(subreq->rreq);
0040     kfree(subreq);
0041 }
0042 
0043 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
0044 {
0045     struct netfs_io_subrequest *subreq;
0046 
0047     while (!list_empty(&rreq->subrequests)) {
0048         subreq = list_first_entry(&rreq->subrequests,
0049                 struct netfs_io_subrequest, rreq_link);
0050         list_del(&subreq->rreq_link);
0051         erofs_fscache_put_subrequest(subreq);
0052     }
0053 }
0054 
0055 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
0056 {
0057     struct netfs_io_subrequest *subreq;
0058     struct folio *folio;
0059     unsigned int iopos = 0;
0060     pgoff_t start_page = rreq->start / PAGE_SIZE;
0061     pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
0062     bool subreq_failed = false;
0063 
0064     XA_STATE(xas, &rreq->mapping->i_pages, start_page);
0065 
0066     subreq = list_first_entry(&rreq->subrequests,
0067                   struct netfs_io_subrequest, rreq_link);
0068     subreq_failed = (subreq->error < 0);
0069 
0070     rcu_read_lock();
0071     xas_for_each(&xas, folio, last_page) {
0072         unsigned int pgpos =
0073             (folio_index(folio) - start_page) * PAGE_SIZE;
0074         unsigned int pgend = pgpos + folio_size(folio);
0075         bool pg_failed = false;
0076 
0077         for (;;) {
0078             if (!subreq) {
0079                 pg_failed = true;
0080                 break;
0081             }
0082 
0083             pg_failed |= subreq_failed;
0084             if (pgend < iopos + subreq->len)
0085                 break;
0086 
0087             iopos += subreq->len;
0088             if (!list_is_last(&subreq->rreq_link,
0089                       &rreq->subrequests)) {
0090                 subreq = list_next_entry(subreq, rreq_link);
0091                 subreq_failed = (subreq->error < 0);
0092             } else {
0093                 subreq = NULL;
0094                 subreq_failed = false;
0095             }
0096             if (pgend == iopos)
0097                 break;
0098         }
0099 
0100         if (!pg_failed)
0101             folio_mark_uptodate(folio);
0102 
0103         folio_unlock(folio);
0104     }
0105     rcu_read_unlock();
0106 }
0107 
0108 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
0109 {
0110     erofs_fscache_rreq_unlock_folios(rreq);
0111     erofs_fscache_clear_subrequests(rreq);
0112     erofs_fscache_put_request(rreq);
0113 }
0114 
0115 static void erofc_fscache_subreq_complete(void *priv,
0116         ssize_t transferred_or_error, bool was_async)
0117 {
0118     struct netfs_io_subrequest *subreq = priv;
0119     struct netfs_io_request *rreq = subreq->rreq;
0120 
0121     if (IS_ERR_VALUE(transferred_or_error))
0122         subreq->error = transferred_or_error;
0123 
0124     if (atomic_dec_and_test(&rreq->nr_outstanding))
0125         erofs_fscache_rreq_complete(rreq);
0126 
0127     erofs_fscache_put_subrequest(subreq);
0128 }
0129 
0130 /*
0131  * Read data from fscache and fill the read data into page cache described by
0132  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
0133  * the start physical address in the cache file.
0134  */
0135 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
0136                 struct netfs_io_request *rreq, loff_t pstart)
0137 {
0138     enum netfs_io_source source;
0139     struct super_block *sb = rreq->mapping->host->i_sb;
0140     struct netfs_io_subrequest *subreq;
0141     struct netfs_cache_resources *cres = &rreq->cache_resources;
0142     struct iov_iter iter;
0143     loff_t start = rreq->start;
0144     size_t len = rreq->len;
0145     size_t done = 0;
0146     int ret;
0147 
0148     atomic_set(&rreq->nr_outstanding, 1);
0149 
0150     ret = fscache_begin_read_operation(cres, cookie);
0151     if (ret)
0152         goto out;
0153 
0154     while (done < len) {
0155         subreq = kzalloc(sizeof(struct netfs_io_subrequest),
0156                  GFP_KERNEL);
0157         if (subreq) {
0158             INIT_LIST_HEAD(&subreq->rreq_link);
0159             refcount_set(&subreq->ref, 2);
0160             subreq->rreq = rreq;
0161             refcount_inc(&rreq->ref);
0162         } else {
0163             ret = -ENOMEM;
0164             goto out;
0165         }
0166 
0167         subreq->start = pstart + done;
0168         subreq->len =  len - done;
0169         subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
0170 
0171         list_add_tail(&subreq->rreq_link, &rreq->subrequests);
0172 
0173         source = cres->ops->prepare_read(subreq, LLONG_MAX);
0174         if (WARN_ON(subreq->len == 0))
0175             source = NETFS_INVALID_READ;
0176         if (source != NETFS_READ_FROM_CACHE) {
0177             erofs_err(sb, "failed to fscache prepare_read (source %d)",
0178                   source);
0179             ret = -EIO;
0180             subreq->error = ret;
0181             erofs_fscache_put_subrequest(subreq);
0182             goto out;
0183         }
0184 
0185         atomic_inc(&rreq->nr_outstanding);
0186 
0187         iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
0188                 start + done, subreq->len);
0189 
0190         ret = fscache_read(cres, subreq->start, &iter,
0191                    NETFS_READ_HOLE_FAIL,
0192                    erofc_fscache_subreq_complete, subreq);
0193         if (ret == -EIOCBQUEUED)
0194             ret = 0;
0195         if (ret) {
0196             erofs_err(sb, "failed to fscache_read (ret %d)", ret);
0197             goto out;
0198         }
0199 
0200         done += subreq->len;
0201     }
0202 out:
0203     if (atomic_dec_and_test(&rreq->nr_outstanding))
0204         erofs_fscache_rreq_complete(rreq);
0205 
0206     return ret;
0207 }
0208 
0209 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
0210 {
0211     int ret;
0212     struct super_block *sb = folio_mapping(folio)->host->i_sb;
0213     struct netfs_io_request *rreq;
0214     struct erofs_map_dev mdev = {
0215         .m_deviceid = 0,
0216         .m_pa = folio_pos(folio),
0217     };
0218 
0219     ret = erofs_map_dev(sb, &mdev);
0220     if (ret)
0221         goto out;
0222 
0223     rreq = erofs_fscache_alloc_request(folio_mapping(folio),
0224                 folio_pos(folio), folio_size(folio));
0225     if (IS_ERR(rreq)) {
0226         ret = PTR_ERR(rreq);
0227         goto out;
0228     }
0229 
0230     return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
0231                 rreq, mdev.m_pa);
0232 out:
0233     folio_unlock(folio);
0234     return ret;
0235 }
0236 
0237 static int erofs_fscache_read_folio_inline(struct folio *folio,
0238                      struct erofs_map_blocks *map)
0239 {
0240     struct super_block *sb = folio_mapping(folio)->host->i_sb;
0241     struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
0242     erofs_blk_t blknr;
0243     size_t offset, len;
0244     void *src, *dst;
0245 
0246     /* For tail packing layout, the offset may be non-zero. */
0247     offset = erofs_blkoff(map->m_pa);
0248     blknr = erofs_blknr(map->m_pa);
0249     len = map->m_llen;
0250 
0251     src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
0252     if (IS_ERR(src))
0253         return PTR_ERR(src);
0254 
0255     dst = kmap_local_folio(folio, 0);
0256     memcpy(dst, src + offset, len);
0257     memset(dst + len, 0, PAGE_SIZE - len);
0258     kunmap_local(dst);
0259 
0260     erofs_put_metabuf(&buf);
0261     return 0;
0262 }
0263 
0264 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
0265 {
0266     struct inode *inode = folio_mapping(folio)->host;
0267     struct super_block *sb = inode->i_sb;
0268     struct erofs_map_blocks map;
0269     struct erofs_map_dev mdev;
0270     struct netfs_io_request *rreq;
0271     erofs_off_t pos;
0272     loff_t pstart;
0273     int ret;
0274 
0275     DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
0276 
0277     pos = folio_pos(folio);
0278     map.m_la = pos;
0279 
0280     ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
0281     if (ret)
0282         goto out_unlock;
0283 
0284     if (!(map.m_flags & EROFS_MAP_MAPPED)) {
0285         folio_zero_range(folio, 0, folio_size(folio));
0286         goto out_uptodate;
0287     }
0288 
0289     if (map.m_flags & EROFS_MAP_META) {
0290         ret = erofs_fscache_read_folio_inline(folio, &map);
0291         goto out_uptodate;
0292     }
0293 
0294     mdev = (struct erofs_map_dev) {
0295         .m_deviceid = map.m_deviceid,
0296         .m_pa = map.m_pa,
0297     };
0298 
0299     ret = erofs_map_dev(sb, &mdev);
0300     if (ret)
0301         goto out_unlock;
0302 
0303 
0304     rreq = erofs_fscache_alloc_request(folio_mapping(folio),
0305                 folio_pos(folio), folio_size(folio));
0306     if (IS_ERR(rreq)) {
0307         ret = PTR_ERR(rreq);
0308         goto out_unlock;
0309     }
0310 
0311     pstart = mdev.m_pa + (pos - map.m_la);
0312     return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
0313                 rreq, pstart);
0314 
0315 out_uptodate:
0316     if (!ret)
0317         folio_mark_uptodate(folio);
0318 out_unlock:
0319     folio_unlock(folio);
0320     return ret;
0321 }
0322 
0323 static void erofs_fscache_advance_folios(struct readahead_control *rac,
0324                      size_t len, bool unlock)
0325 {
0326     while (len) {
0327         struct folio *folio = readahead_folio(rac);
0328         len -= folio_size(folio);
0329         if (unlock) {
0330             folio_mark_uptodate(folio);
0331             folio_unlock(folio);
0332         }
0333     }
0334 }
0335 
0336 static void erofs_fscache_readahead(struct readahead_control *rac)
0337 {
0338     struct inode *inode = rac->mapping->host;
0339     struct super_block *sb = inode->i_sb;
0340     size_t len, count, done = 0;
0341     erofs_off_t pos;
0342     loff_t start, offset;
0343     int ret;
0344 
0345     if (!readahead_count(rac))
0346         return;
0347 
0348     start = readahead_pos(rac);
0349     len = readahead_length(rac);
0350 
0351     do {
0352         struct erofs_map_blocks map;
0353         struct erofs_map_dev mdev;
0354         struct netfs_io_request *rreq;
0355 
0356         pos = start + done;
0357         map.m_la = pos;
0358 
0359         ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
0360         if (ret)
0361             return;
0362 
0363         offset = start + done;
0364         count = min_t(size_t, map.m_llen - (pos - map.m_la),
0365                   len - done);
0366 
0367         if (!(map.m_flags & EROFS_MAP_MAPPED)) {
0368             struct iov_iter iter;
0369 
0370             iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
0371                     offset, count);
0372             iov_iter_zero(count, &iter);
0373 
0374             erofs_fscache_advance_folios(rac, count, true);
0375             ret = count;
0376             continue;
0377         }
0378 
0379         if (map.m_flags & EROFS_MAP_META) {
0380             struct folio *folio = readahead_folio(rac);
0381 
0382             ret = erofs_fscache_read_folio_inline(folio, &map);
0383             if (!ret) {
0384                 folio_mark_uptodate(folio);
0385                 ret = folio_size(folio);
0386             }
0387 
0388             folio_unlock(folio);
0389             continue;
0390         }
0391 
0392         mdev = (struct erofs_map_dev) {
0393             .m_deviceid = map.m_deviceid,
0394             .m_pa = map.m_pa,
0395         };
0396         ret = erofs_map_dev(sb, &mdev);
0397         if (ret)
0398             return;
0399 
0400         rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
0401         if (IS_ERR(rreq))
0402             return;
0403         /*
0404          * Drop the ref of folios here. Unlock them in
0405          * rreq_unlock_folios() when rreq complete.
0406          */
0407         erofs_fscache_advance_folios(rac, count, false);
0408         ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
0409                     rreq, mdev.m_pa + (pos - map.m_la));
0410         if (!ret)
0411             ret = count;
0412     } while (ret > 0 && ((done += ret) < len));
0413 }
0414 
0415 static const struct address_space_operations erofs_fscache_meta_aops = {
0416     .read_folio = erofs_fscache_meta_read_folio,
0417 };
0418 
0419 const struct address_space_operations erofs_fscache_access_aops = {
0420     .read_folio = erofs_fscache_read_folio,
0421     .readahead = erofs_fscache_readahead,
0422 };
0423 
0424 int erofs_fscache_register_cookie(struct super_block *sb,
0425                   struct erofs_fscache **fscache,
0426                   char *name, bool need_inode)
0427 {
0428     struct fscache_volume *volume = EROFS_SB(sb)->volume;
0429     struct erofs_fscache *ctx;
0430     struct fscache_cookie *cookie;
0431     int ret;
0432 
0433     ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
0434     if (!ctx)
0435         return -ENOMEM;
0436 
0437     cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
0438                     name, strlen(name), NULL, 0, 0);
0439     if (!cookie) {
0440         erofs_err(sb, "failed to get cookie for %s", name);
0441         ret = -EINVAL;
0442         goto err;
0443     }
0444 
0445     fscache_use_cookie(cookie, false);
0446     ctx->cookie = cookie;
0447 
0448     if (need_inode) {
0449         struct inode *const inode = new_inode(sb);
0450 
0451         if (!inode) {
0452             erofs_err(sb, "failed to get anon inode for %s", name);
0453             ret = -ENOMEM;
0454             goto err_cookie;
0455         }
0456 
0457         set_nlink(inode, 1);
0458         inode->i_size = OFFSET_MAX;
0459         inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
0460         mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
0461 
0462         ctx->inode = inode;
0463     }
0464 
0465     *fscache = ctx;
0466     return 0;
0467 
0468 err_cookie:
0469     fscache_unuse_cookie(ctx->cookie, NULL, NULL);
0470     fscache_relinquish_cookie(ctx->cookie, false);
0471     ctx->cookie = NULL;
0472 err:
0473     kfree(ctx);
0474     return ret;
0475 }
0476 
0477 void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
0478 {
0479     struct erofs_fscache *ctx = *fscache;
0480 
0481     if (!ctx)
0482         return;
0483 
0484     fscache_unuse_cookie(ctx->cookie, NULL, NULL);
0485     fscache_relinquish_cookie(ctx->cookie, false);
0486     ctx->cookie = NULL;
0487 
0488     iput(ctx->inode);
0489     ctx->inode = NULL;
0490 
0491     kfree(ctx);
0492     *fscache = NULL;
0493 }
0494 
0495 int erofs_fscache_register_fs(struct super_block *sb)
0496 {
0497     struct erofs_sb_info *sbi = EROFS_SB(sb);
0498     struct fscache_volume *volume;
0499     char *name;
0500     int ret = 0;
0501 
0502     name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
0503     if (!name)
0504         return -ENOMEM;
0505 
0506     volume = fscache_acquire_volume(name, NULL, NULL, 0);
0507     if (IS_ERR_OR_NULL(volume)) {
0508         erofs_err(sb, "failed to register volume for %s", name);
0509         ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
0510         volume = NULL;
0511     }
0512 
0513     sbi->volume = volume;
0514     kfree(name);
0515     return ret;
0516 }
0517 
0518 void erofs_fscache_unregister_fs(struct super_block *sb)
0519 {
0520     struct erofs_sb_info *sbi = EROFS_SB(sb);
0521 
0522     fscache_relinquish_volume(sbi->volume, NULL, false);
0523     sbi->volume = NULL;
0524 }