Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* Cache data I/O routines
0003  *
0004  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
0005  * Written by David Howells (dhowells@redhat.com)
0006  */
0007 #define FSCACHE_DEBUG_LEVEL OPERATION
0008 #include <linux/fscache-cache.h>
0009 #include <linux/uio.h>
0010 #include <linux/bvec.h>
0011 #include <linux/slab.h>
0012 #include <linux/uio.h>
0013 #include "internal.h"
0014 
0015 /**
0016  * fscache_wait_for_operation - Wait for an object become accessible
0017  * @cres: The cache resources for the operation being performed
0018  * @want_state: The minimum state the object must be at
0019  *
0020  * See if the target cache object is at the specified minimum state of
0021  * accessibility yet, and if not, wait for it.
0022  */
0023 bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
0024                 enum fscache_want_state want_state)
0025 {
0026     struct fscache_cookie *cookie = fscache_cres_cookie(cres);
0027     enum fscache_cookie_state state;
0028 
0029 again:
0030     if (!fscache_cache_is_live(cookie->volume->cache)) {
0031         _leave(" [broken]");
0032         return false;
0033     }
0034 
0035     state = fscache_cookie_state(cookie);
0036     _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
0037 
0038     switch (state) {
0039     case FSCACHE_COOKIE_STATE_CREATING:
0040     case FSCACHE_COOKIE_STATE_INVALIDATING:
0041         if (want_state == FSCACHE_WANT_PARAMS)
0042             goto ready; /* There can be no content */
0043         fallthrough;
0044     case FSCACHE_COOKIE_STATE_LOOKING_UP:
0045     case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
0046         wait_var_event(&cookie->state,
0047                    fscache_cookie_state(cookie) != state);
0048         goto again;
0049 
0050     case FSCACHE_COOKIE_STATE_ACTIVE:
0051         goto ready;
0052     case FSCACHE_COOKIE_STATE_DROPPED:
0053     case FSCACHE_COOKIE_STATE_RELINQUISHING:
0054     default:
0055         _leave(" [not live]");
0056         return false;
0057     }
0058 
0059 ready:
0060     if (!cres->cache_priv2)
0061         return cookie->volume->cache->ops->begin_operation(cres, want_state);
0062     return true;
0063 }
0064 EXPORT_SYMBOL(fscache_wait_for_operation);
0065 
0066 /*
0067  * Begin an I/O operation on the cache, waiting till we reach the right state.
0068  *
0069  * Attaches the resources required to the operation resources record.
0070  */
0071 static int fscache_begin_operation(struct netfs_cache_resources *cres,
0072                    struct fscache_cookie *cookie,
0073                    enum fscache_want_state want_state,
0074                    enum fscache_access_trace why)
0075 {
0076     enum fscache_cookie_state state;
0077     long timeo;
0078     bool once_only = false;
0079 
0080     cres->ops       = NULL;
0081     cres->cache_priv    = cookie;
0082     cres->cache_priv2   = NULL;
0083     cres->debug_id      = cookie->debug_id;
0084     cres->inval_counter = cookie->inval_counter;
0085 
0086     if (!fscache_begin_cookie_access(cookie, why))
0087         return -ENOBUFS;
0088 
0089 again:
0090     spin_lock(&cookie->lock);
0091 
0092     state = fscache_cookie_state(cookie);
0093     _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
0094 
0095     switch (state) {
0096     case FSCACHE_COOKIE_STATE_LOOKING_UP:
0097     case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
0098     case FSCACHE_COOKIE_STATE_INVALIDATING:
0099         goto wait_for_file_wrangling;
0100     case FSCACHE_COOKIE_STATE_CREATING:
0101         if (want_state == FSCACHE_WANT_PARAMS)
0102             goto ready; /* There can be no content */
0103         goto wait_for_file_wrangling;
0104     case FSCACHE_COOKIE_STATE_ACTIVE:
0105         goto ready;
0106     case FSCACHE_COOKIE_STATE_DROPPED:
0107     case FSCACHE_COOKIE_STATE_RELINQUISHING:
0108         WARN(1, "Can't use cookie in state %u\n", cookie->state);
0109         goto not_live;
0110     default:
0111         goto not_live;
0112     }
0113 
0114 ready:
0115     spin_unlock(&cookie->lock);
0116     if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
0117         goto failed;
0118     return 0;
0119 
0120 wait_for_file_wrangling:
0121     spin_unlock(&cookie->lock);
0122     trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
0123                  atomic_read(&cookie->n_accesses),
0124                  fscache_access_io_wait);
0125     timeo = wait_var_event_timeout(&cookie->state,
0126                        fscache_cookie_state(cookie) != state, 20 * HZ);
0127     if (timeo <= 1 && !once_only) {
0128         pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
0129             __func__, fscache_cookie_state(cookie), state);
0130         fscache_print_cookie(cookie, 'O');
0131         once_only = true;
0132     }
0133     goto again;
0134 
0135 not_live:
0136     spin_unlock(&cookie->lock);
0137 failed:
0138     cres->cache_priv = NULL;
0139     cres->ops = NULL;
0140     fscache_end_cookie_access(cookie, fscache_access_io_not_live);
0141     _leave(" = -ENOBUFS");
0142     return -ENOBUFS;
0143 }
0144 
0145 int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
0146                    struct fscache_cookie *cookie)
0147 {
0148     return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
0149                        fscache_access_io_read);
0150 }
0151 EXPORT_SYMBOL(__fscache_begin_read_operation);
0152 
0153 int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
0154                     struct fscache_cookie *cookie)
0155 {
0156     return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
0157                        fscache_access_io_write);
0158 }
0159 EXPORT_SYMBOL(__fscache_begin_write_operation);
0160 
0161 /**
0162  * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
0163  * @mapping: The mapping the folio belongs to.
0164  * @folio: The folio being dirtied.
0165  * @cookie: The cookie referring to the cache object
0166  *
0167  * Set the dirty flag on a folio and pin an in-use cache object in memory
0168  * so that writeback can later write to it.  This is intended
0169  * to be called from the filesystem's ->dirty_folio() method.
0170  *
0171  * Return: true if the dirty flag was set on the folio, false otherwise.
0172  */
0173 bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
0174                 struct fscache_cookie *cookie)
0175 {
0176     struct inode *inode = mapping->host;
0177     bool need_use = false;
0178 
0179     _enter("");
0180 
0181     if (!filemap_dirty_folio(mapping, folio))
0182         return false;
0183     if (!fscache_cookie_valid(cookie))
0184         return true;
0185 
0186     if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
0187         spin_lock(&inode->i_lock);
0188         if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
0189             inode->i_state |= I_PINNING_FSCACHE_WB;
0190             need_use = true;
0191         }
0192         spin_unlock(&inode->i_lock);
0193 
0194         if (need_use)
0195             fscache_use_cookie(cookie, true);
0196     }
0197     return true;
0198 }
0199 EXPORT_SYMBOL(fscache_dirty_folio);
0200 
0201 struct fscache_write_request {
0202     struct netfs_cache_resources cache_resources;
0203     struct address_space    *mapping;
0204     loff_t          start;
0205     size_t          len;
0206     bool            set_bits;
0207     netfs_io_terminated_t   term_func;
0208     void            *term_func_priv;
0209 };
0210 
0211 void __fscache_clear_page_bits(struct address_space *mapping,
0212                    loff_t start, size_t len)
0213 {
0214     pgoff_t first = start / PAGE_SIZE;
0215     pgoff_t last = (start + len - 1) / PAGE_SIZE;
0216     struct page *page;
0217 
0218     if (len) {
0219         XA_STATE(xas, &mapping->i_pages, first);
0220 
0221         rcu_read_lock();
0222         xas_for_each(&xas, page, last) {
0223             end_page_fscache(page);
0224         }
0225         rcu_read_unlock();
0226     }
0227 }
0228 EXPORT_SYMBOL(__fscache_clear_page_bits);
0229 
0230 /*
0231  * Deal with the completion of writing the data to the cache.
0232  */
0233 static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
0234                   bool was_async)
0235 {
0236     struct fscache_write_request *wreq = priv;
0237 
0238     fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
0239                 wreq->set_bits);
0240 
0241     if (wreq->term_func)
0242         wreq->term_func(wreq->term_func_priv, transferred_or_error,
0243                 was_async);
0244     fscache_end_operation(&wreq->cache_resources);
0245     kfree(wreq);
0246 }
0247 
0248 void __fscache_write_to_cache(struct fscache_cookie *cookie,
0249                   struct address_space *mapping,
0250                   loff_t start, size_t len, loff_t i_size,
0251                   netfs_io_terminated_t term_func,
0252                   void *term_func_priv,
0253                   bool cond)
0254 {
0255     struct fscache_write_request *wreq;
0256     struct netfs_cache_resources *cres;
0257     struct iov_iter iter;
0258     int ret = -ENOBUFS;
0259 
0260     if (len == 0)
0261         goto abandon;
0262 
0263     _enter("%llx,%zx", start, len);
0264 
0265     wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
0266     if (!wreq)
0267         goto abandon;
0268     wreq->mapping       = mapping;
0269     wreq->start     = start;
0270     wreq->len       = len;
0271     wreq->set_bits      = cond;
0272     wreq->term_func     = term_func;
0273     wreq->term_func_priv    = term_func_priv;
0274 
0275     cres = &wreq->cache_resources;
0276     if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
0277                     fscache_access_io_write) < 0)
0278         goto abandon_free;
0279 
0280     ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
0281     if (ret < 0)
0282         goto abandon_end;
0283 
0284     /* TODO: Consider clearing page bits now for space the write isn't
0285      * covering.  This is more complicated than it appears when THPs are
0286      * taken into account.
0287      */
0288 
0289     iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
0290     fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
0291     return;
0292 
0293 abandon_end:
0294     return fscache_wreq_done(wreq, ret, false);
0295 abandon_free:
0296     kfree(wreq);
0297 abandon:
0298     fscache_clear_page_bits(mapping, start, len, cond);
0299     if (term_func)
0300         term_func(term_func_priv, ret, false);
0301 }
0302 EXPORT_SYMBOL(__fscache_write_to_cache);
0303 
0304 /*
0305  * Change the size of a backing object.
0306  */
0307 void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
0308 {
0309     struct netfs_cache_resources cres;
0310 
0311     trace_fscache_resize(cookie, new_size);
0312     if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
0313                     fscache_access_io_resize) == 0) {
0314         fscache_stat(&fscache_n_resizes);
0315         set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
0316 
0317         /* We cannot defer a resize as we need to do it inside the
0318          * netfs's inode lock so that we're serialised with respect to
0319          * writes.
0320          */
0321         cookie->volume->cache->ops->resize_cookie(&cres, new_size);
0322         fscache_end_operation(&cres);
0323     } else {
0324         fscache_stat(&fscache_n_resizes_null);
0325     }
0326 }
0327 EXPORT_SYMBOL(__fscache_resize_cookie);