Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * This file contians vfs address (mmap) ops for 9P2000.
0004  *
0005  *  Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
0006  *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
0007  */
0008 
0009 #include <linux/module.h>
0010 #include <linux/errno.h>
0011 #include <linux/fs.h>
0012 #include <linux/file.h>
0013 #include <linux/stat.h>
0014 #include <linux/string.h>
0015 #include <linux/inet.h>
0016 #include <linux/pagemap.h>
0017 #include <linux/idr.h>
0018 #include <linux/sched.h>
0019 #include <linux/swap.h>
0020 #include <linux/uio.h>
0021 #include <linux/netfs.h>
0022 #include <net/9p/9p.h>
0023 #include <net/9p/client.h>
0024 
0025 #include "v9fs.h"
0026 #include "v9fs_vfs.h"
0027 #include "cache.h"
0028 #include "fid.h"
0029 
0030 /**
0031  * v9fs_issue_read - Issue a read from 9P
0032  * @subreq: The read to make
0033  */
0034 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
0035 {
0036     struct netfs_io_request *rreq = subreq->rreq;
0037     struct p9_fid *fid = rreq->netfs_priv;
0038     struct iov_iter to;
0039     loff_t pos = subreq->start + subreq->transferred;
0040     size_t len = subreq->len   - subreq->transferred;
0041     int total, err;
0042 
0043     iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
0044 
0045     total = p9_client_read(fid, pos, &to, &err);
0046 
0047     /* if we just extended the file size, any portion not in
0048      * cache won't be on server and is zeroes */
0049     __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
0050 
0051     netfs_subreq_terminated(subreq, err ?: total, false);
0052 }
0053 
0054 /**
0055  * v9fs_init_request - Initialise a read request
0056  * @rreq: The read request
0057  * @file: The file being read from
0058  */
0059 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
0060 {
0061     struct inode *inode = file_inode(file);
0062     struct v9fs_inode *v9inode = V9FS_I(inode);
0063     struct p9_fid *fid = file->private_data;
0064 
0065     BUG_ON(!fid);
0066 
0067     /* we might need to read from a fid that was opened write-only
0068      * for read-modify-write of page cache, use the writeback fid
0069      * for that */
0070     if (rreq->origin == NETFS_READ_FOR_WRITE &&
0071             (fid->mode & O_ACCMODE) == O_WRONLY) {
0072         fid = v9inode->writeback_fid;
0073         BUG_ON(!fid);
0074     }
0075 
0076     p9_fid_get(fid);
0077     rreq->netfs_priv = fid;
0078     return 0;
0079 }
0080 
0081 /**
0082  * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
0083  * @rreq: The I/O request to clean up
0084  */
0085 static void v9fs_free_request(struct netfs_io_request *rreq)
0086 {
0087     struct p9_fid *fid = rreq->netfs_priv;
0088 
0089     p9_fid_put(fid);
0090 }
0091 
0092 /**
0093  * v9fs_begin_cache_operation - Begin a cache operation for a read
0094  * @rreq: The read request
0095  */
0096 static int v9fs_begin_cache_operation(struct netfs_io_request *rreq)
0097 {
0098 #ifdef CONFIG_9P_FSCACHE
0099     struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
0100 
0101     return fscache_begin_read_operation(&rreq->cache_resources, cookie);
0102 #else
0103     return -ENOBUFS;
0104 #endif
0105 }
0106 
0107 const struct netfs_request_ops v9fs_req_ops = {
0108     .init_request       = v9fs_init_request,
0109     .free_request       = v9fs_free_request,
0110     .begin_cache_operation  = v9fs_begin_cache_operation,
0111     .issue_read     = v9fs_issue_read,
0112 };
0113 
0114 /**
0115  * v9fs_release_folio - release the private state associated with a folio
0116  * @folio: The folio to be released
0117  * @gfp: The caller's allocation restrictions
0118  *
0119  * Returns true if the page can be released, false otherwise.
0120  */
0121 
0122 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
0123 {
0124     struct inode *inode = folio_inode(folio);
0125 
0126     if (folio_test_private(folio))
0127         return false;
0128 #ifdef CONFIG_9P_FSCACHE
0129     if (folio_test_fscache(folio)) {
0130         if (current_is_kswapd() || !(gfp & __GFP_FS))
0131             return false;
0132         folio_wait_fscache(folio);
0133     }
0134 #endif
0135     fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
0136     return true;
0137 }
0138 
0139 static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
0140                  size_t length)
0141 {
0142     folio_wait_fscache(folio);
0143 }
0144 
0145 static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
0146                      bool was_async)
0147 {
0148     struct v9fs_inode *v9inode = priv;
0149     __le32 version;
0150 
0151     if (IS_ERR_VALUE(transferred_or_error) &&
0152         transferred_or_error != -ENOBUFS) {
0153         version = cpu_to_le32(v9inode->qid.version);
0154         fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
0155                    i_size_read(&v9inode->netfs.inode), 0);
0156     }
0157 }
0158 
0159 static int v9fs_vfs_write_folio_locked(struct folio *folio)
0160 {
0161     struct inode *inode = folio_inode(folio);
0162     struct v9fs_inode *v9inode = V9FS_I(inode);
0163     struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
0164     loff_t start = folio_pos(folio);
0165     loff_t i_size = i_size_read(inode);
0166     struct iov_iter from;
0167     size_t len = folio_size(folio);
0168     int err;
0169 
0170     if (start >= i_size)
0171         return 0; /* Simultaneous truncation occurred */
0172 
0173     len = min_t(loff_t, i_size - start, len);
0174 
0175     iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
0176 
0177     /* We should have writeback_fid always set */
0178     BUG_ON(!v9inode->writeback_fid);
0179 
0180     folio_wait_fscache(folio);
0181     folio_start_writeback(folio);
0182 
0183     p9_client_write(v9inode->writeback_fid, start, &from, &err);
0184 
0185     if (err == 0 &&
0186         fscache_cookie_enabled(cookie) &&
0187         test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
0188         folio_start_fscache(folio);
0189         fscache_write_to_cache(v9fs_inode_cookie(v9inode),
0190                        folio_mapping(folio), start, len, i_size,
0191                        v9fs_write_to_cache_done, v9inode,
0192                        true);
0193     }
0194 
0195     folio_end_writeback(folio);
0196     return err;
0197 }
0198 
0199 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
0200 {
0201     struct folio *folio = page_folio(page);
0202     int retval;
0203 
0204     p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
0205 
0206     retval = v9fs_vfs_write_folio_locked(folio);
0207     if (retval < 0) {
0208         if (retval == -EAGAIN) {
0209             folio_redirty_for_writepage(wbc, folio);
0210             retval = 0;
0211         } else {
0212             mapping_set_error(folio_mapping(folio), retval);
0213         }
0214     } else
0215         retval = 0;
0216 
0217     folio_unlock(folio);
0218     return retval;
0219 }
0220 
0221 static int v9fs_launder_folio(struct folio *folio)
0222 {
0223     int retval;
0224 
0225     if (folio_clear_dirty_for_io(folio)) {
0226         retval = v9fs_vfs_write_folio_locked(folio);
0227         if (retval)
0228             return retval;
0229     }
0230     folio_wait_fscache(folio);
0231     return 0;
0232 }
0233 
0234 /**
0235  * v9fs_direct_IO - 9P address space operation for direct I/O
0236  * @iocb: target I/O control block
0237  * @iter: The data/buffer to use
0238  *
0239  * The presence of v9fs_direct_IO() in the address space ops vector
0240  * allowes open() O_DIRECT flags which would have failed otherwise.
0241  *
0242  * In the non-cached mode, we shunt off direct read and write requests before
0243  * the VFS gets them, so this method should never be called.
0244  *
0245  * Direct IO is not 'yet' supported in the cached mode. Hence when
0246  * this routine is called through generic_file_aio_read(), the read/write fails
0247  * with an error.
0248  *
0249  */
0250 static ssize_t
0251 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
0252 {
0253     struct file *file = iocb->ki_filp;
0254     loff_t pos = iocb->ki_pos;
0255     ssize_t n;
0256     int err = 0;
0257 
0258     if (iov_iter_rw(iter) == WRITE) {
0259         n = p9_client_write(file->private_data, pos, iter, &err);
0260         if (n) {
0261             struct inode *inode = file_inode(file);
0262             loff_t i_size = i_size_read(inode);
0263 
0264             if (pos + n > i_size)
0265                 inode_add_bytes(inode, pos + n - i_size);
0266         }
0267     } else {
0268         n = p9_client_read(file->private_data, pos, iter, &err);
0269     }
0270     return n ? n : err;
0271 }
0272 
0273 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
0274                 loff_t pos, unsigned int len,
0275                 struct page **subpagep, void **fsdata)
0276 {
0277     int retval;
0278     struct folio *folio;
0279     struct v9fs_inode *v9inode = V9FS_I(mapping->host);
0280 
0281     p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
0282 
0283     BUG_ON(!v9inode->writeback_fid);
0284 
0285     /* Prefetch area to be written into the cache if we're caching this
0286      * file.  We need to do this before we get a lock on the page in case
0287      * there's more than one writer competing for the same cache block.
0288      */
0289     retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata);
0290     if (retval < 0)
0291         return retval;
0292 
0293     *subpagep = &folio->page;
0294     return retval;
0295 }
0296 
0297 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
0298               loff_t pos, unsigned int len, unsigned int copied,
0299               struct page *subpage, void *fsdata)
0300 {
0301     loff_t last_pos = pos + copied;
0302     struct folio *folio = page_folio(subpage);
0303     struct inode *inode = mapping->host;
0304     struct v9fs_inode *v9inode = V9FS_I(inode);
0305 
0306     p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
0307 
0308     if (!folio_test_uptodate(folio)) {
0309         if (unlikely(copied < len)) {
0310             copied = 0;
0311             goto out;
0312         }
0313 
0314         folio_mark_uptodate(folio);
0315     }
0316 
0317     /*
0318      * No need to use i_size_read() here, the i_size
0319      * cannot change under us because we hold the i_mutex.
0320      */
0321     if (last_pos > inode->i_size) {
0322         inode_add_bytes(inode, last_pos - inode->i_size);
0323         i_size_write(inode, last_pos);
0324         fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
0325     }
0326     folio_mark_dirty(folio);
0327 out:
0328     folio_unlock(folio);
0329     folio_put(folio);
0330 
0331     return copied;
0332 }
0333 
0334 #ifdef CONFIG_9P_FSCACHE
0335 /*
0336  * Mark a page as having been made dirty and thus needing writeback.  We also
0337  * need to pin the cache object to write back to.
0338  */
0339 static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
0340 {
0341     struct v9fs_inode *v9inode = V9FS_I(mapping->host);
0342 
0343     return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
0344 }
0345 #else
0346 #define v9fs_dirty_folio filemap_dirty_folio
0347 #endif
0348 
0349 const struct address_space_operations v9fs_addr_operations = {
0350     .read_folio = netfs_read_folio,
0351     .readahead = netfs_readahead,
0352     .dirty_folio = v9fs_dirty_folio,
0353     .writepage = v9fs_vfs_writepage,
0354     .write_begin = v9fs_write_begin,
0355     .write_end = v9fs_write_end,
0356     .release_folio = v9fs_release_folio,
0357     .invalidate_folio = v9fs_invalidate_folio,
0358     .launder_folio = v9fs_launder_folio,
0359     .direct_IO = v9fs_direct_IO,
0360 };