Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * VirtualBox Guest Shared Folders support: Regular file inode and file ops.
0004  *
0005  * Copyright (C) 2006-2018 Oracle Corporation
0006  */
0007 
0008 #include <linux/mm.h>
0009 #include <linux/page-flags.h>
0010 #include <linux/pagemap.h>
0011 #include <linux/highmem.h>
0012 #include <linux/sizes.h>
0013 #include "vfsmod.h"
0014 
0015 struct vboxsf_handle {
0016     u64 handle;
0017     u32 root;
0018     u32 access_flags;
0019     struct kref refcount;
0020     struct list_head head;
0021 };
0022 
0023 struct vboxsf_handle *vboxsf_create_sf_handle(struct inode *inode,
0024                           u64 handle, u32 access_flags)
0025 {
0026     struct vboxsf_inode *sf_i = VBOXSF_I(inode);
0027     struct vboxsf_handle *sf_handle;
0028 
0029     sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
0030     if (!sf_handle)
0031         return ERR_PTR(-ENOMEM);
0032 
0033     /* the host may have given us different attr then requested */
0034     sf_i->force_restat = 1;
0035 
0036     /* init our handle struct and add it to the inode's handles list */
0037     sf_handle->handle = handle;
0038     sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
0039     sf_handle->access_flags = access_flags;
0040     kref_init(&sf_handle->refcount);
0041 
0042     mutex_lock(&sf_i->handle_list_mutex);
0043     list_add(&sf_handle->head, &sf_i->handle_list);
0044     mutex_unlock(&sf_i->handle_list_mutex);
0045 
0046     return sf_handle;
0047 }
0048 
0049 static int vboxsf_file_open(struct inode *inode, struct file *file)
0050 {
0051     struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
0052     struct shfl_createparms params = {};
0053     struct vboxsf_handle *sf_handle;
0054     u32 access_flags = 0;
0055     int err;
0056 
0057     /*
0058      * We check the value of params.handle afterwards to find out if
0059      * the call succeeded or failed, as the API does not seem to cleanly
0060      * distinguish error and informational messages.
0061      *
0062      * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
0063      * make the shared folders host service use our mode parameter.
0064      */
0065     params.handle = SHFL_HANDLE_NIL;
0066     if (file->f_flags & O_CREAT) {
0067         params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
0068         /*
0069          * We ignore O_EXCL, as the Linux kernel seems to call create
0070          * beforehand itself, so O_EXCL should always fail.
0071          */
0072         if (file->f_flags & O_TRUNC)
0073             params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
0074         else
0075             params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
0076     } else {
0077         params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
0078         if (file->f_flags & O_TRUNC)
0079             params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
0080     }
0081 
0082     switch (file->f_flags & O_ACCMODE) {
0083     case O_RDONLY:
0084         access_flags |= SHFL_CF_ACCESS_READ;
0085         break;
0086 
0087     case O_WRONLY:
0088         access_flags |= SHFL_CF_ACCESS_WRITE;
0089         break;
0090 
0091     case O_RDWR:
0092         access_flags |= SHFL_CF_ACCESS_READWRITE;
0093         break;
0094 
0095     default:
0096         WARN_ON(1);
0097     }
0098 
0099     if (file->f_flags & O_APPEND)
0100         access_flags |= SHFL_CF_ACCESS_APPEND;
0101 
0102     params.create_flags |= access_flags;
0103     params.info.attr.mode = inode->i_mode;
0104 
0105     err = vboxsf_create_at_dentry(file_dentry(file), &params);
0106     if (err == 0 && params.handle == SHFL_HANDLE_NIL)
0107         err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
0108     if (err)
0109         return err;
0110 
0111     sf_handle = vboxsf_create_sf_handle(inode, params.handle, access_flags);
0112     if (IS_ERR(sf_handle)) {
0113         vboxsf_close(sbi->root, params.handle);
0114         return PTR_ERR(sf_handle);
0115     }
0116 
0117     file->private_data = sf_handle;
0118     return 0;
0119 }
0120 
0121 static void vboxsf_handle_release(struct kref *refcount)
0122 {
0123     struct vboxsf_handle *sf_handle =
0124         container_of(refcount, struct vboxsf_handle, refcount);
0125 
0126     vboxsf_close(sf_handle->root, sf_handle->handle);
0127     kfree(sf_handle);
0128 }
0129 
0130 void vboxsf_release_sf_handle(struct inode *inode, struct vboxsf_handle *sf_handle)
0131 {
0132     struct vboxsf_inode *sf_i = VBOXSF_I(inode);
0133 
0134     mutex_lock(&sf_i->handle_list_mutex);
0135     list_del(&sf_handle->head);
0136     mutex_unlock(&sf_i->handle_list_mutex);
0137 
0138     kref_put(&sf_handle->refcount, vboxsf_handle_release);
0139 }
0140 
0141 static int vboxsf_file_release(struct inode *inode, struct file *file)
0142 {
0143     /*
0144      * When a file is closed on our (the guest) side, we want any subsequent
0145      * accesses done on the host side to see all changes done from our side.
0146      */
0147     filemap_write_and_wait(inode->i_mapping);
0148 
0149     vboxsf_release_sf_handle(inode, file->private_data);
0150     return 0;
0151 }
0152 
0153 /*
0154  * Write back dirty pages now, because there may not be any suitable
0155  * open files later
0156  */
0157 static void vboxsf_vma_close(struct vm_area_struct *vma)
0158 {
0159     filemap_write_and_wait(vma->vm_file->f_mapping);
0160 }
0161 
0162 static const struct vm_operations_struct vboxsf_file_vm_ops = {
0163     .close      = vboxsf_vma_close,
0164     .fault      = filemap_fault,
0165     .map_pages  = filemap_map_pages,
0166 };
0167 
0168 static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
0169 {
0170     int err;
0171 
0172     err = generic_file_mmap(file, vma);
0173     if (!err)
0174         vma->vm_ops = &vboxsf_file_vm_ops;
0175 
0176     return err;
0177 }
0178 
0179 /*
0180  * Note that since we are accessing files on the host's filesystem, files
0181  * may always be changed underneath us by the host!
0182  *
0183  * The vboxsf API between the guest and the host does not offer any functions
0184  * to deal with this. There is no inode-generation to check for changes, no
0185  * events / callback on changes and no way to lock files.
0186  *
0187  * To avoid returning stale data when a file gets *opened* on our (the guest)
0188  * side, we do a "stat" on the host side, then compare the mtime with the
0189  * last known mtime and invalidate the page-cache if they differ.
0190  * This is done from vboxsf_inode_revalidate().
0191  *
0192  * When reads are done through the read_iter fop, it is possible to do
0193  * further cache revalidation then, there are 3 options to deal with this:
0194  *
0195  * 1)  Rely solely on the revalidation done at open time
0196  * 2)  Do another "stat" and compare mtime again. Unfortunately the vboxsf
0197  *     host API does not allow stat on handles, so we would need to use
0198  *     file->f_path.dentry and the stat will then fail if the file was unlinked
0199  *     or renamed (and there is no thing like NFS' silly-rename). So we get:
0200  * 2a) "stat" and compare mtime, on stat failure invalidate the cache
0201  * 2b) "stat" and compare mtime, on stat failure do nothing
0202  * 3)  Simply always call invalidate_inode_pages2_range on the range of the read
0203  *
0204  * Currently we are keeping things KISS and using option 1. this allows
0205  * directly using generic_file_read_iter without wrapping it.
0206  *
0207  * This means that only data written on the host side before open() on
0208  * the guest side is guaranteed to be seen by the guest. If necessary
0209  * we may provide other read-cache strategies in the future and make this
0210  * configurable through a mount option.
0211  */
0212 const struct file_operations vboxsf_reg_fops = {
0213     .llseek = generic_file_llseek,
0214     .read_iter = generic_file_read_iter,
0215     .write_iter = generic_file_write_iter,
0216     .mmap = vboxsf_file_mmap,
0217     .open = vboxsf_file_open,
0218     .release = vboxsf_file_release,
0219     .fsync = noop_fsync,
0220     .splice_read = generic_file_splice_read,
0221 };
0222 
0223 const struct inode_operations vboxsf_reg_iops = {
0224     .getattr = vboxsf_getattr,
0225     .setattr = vboxsf_setattr
0226 };
0227 
0228 static int vboxsf_read_folio(struct file *file, struct folio *folio)
0229 {
0230     struct page *page = &folio->page;
0231     struct vboxsf_handle *sf_handle = file->private_data;
0232     loff_t off = page_offset(page);
0233     u32 nread = PAGE_SIZE;
0234     u8 *buf;
0235     int err;
0236 
0237     buf = kmap(page);
0238 
0239     err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
0240     if (err == 0) {
0241         memset(&buf[nread], 0, PAGE_SIZE - nread);
0242         flush_dcache_page(page);
0243         SetPageUptodate(page);
0244     } else {
0245         SetPageError(page);
0246     }
0247 
0248     kunmap(page);
0249     unlock_page(page);
0250     return err;
0251 }
0252 
0253 static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
0254 {
0255     struct vboxsf_handle *h, *sf_handle = NULL;
0256 
0257     mutex_lock(&sf_i->handle_list_mutex);
0258     list_for_each_entry(h, &sf_i->handle_list, head) {
0259         if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
0260             h->access_flags == SHFL_CF_ACCESS_READWRITE) {
0261             kref_get(&h->refcount);
0262             sf_handle = h;
0263             break;
0264         }
0265     }
0266     mutex_unlock(&sf_i->handle_list_mutex);
0267 
0268     return sf_handle;
0269 }
0270 
0271 static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
0272 {
0273     struct inode *inode = page->mapping->host;
0274     struct vboxsf_inode *sf_i = VBOXSF_I(inode);
0275     struct vboxsf_handle *sf_handle;
0276     loff_t off = page_offset(page);
0277     loff_t size = i_size_read(inode);
0278     u32 nwrite = PAGE_SIZE;
0279     u8 *buf;
0280     int err;
0281 
0282     if (off + PAGE_SIZE > size)
0283         nwrite = size & ~PAGE_MASK;
0284 
0285     sf_handle = vboxsf_get_write_handle(sf_i);
0286     if (!sf_handle)
0287         return -EBADF;
0288 
0289     buf = kmap(page);
0290     err = vboxsf_write(sf_handle->root, sf_handle->handle,
0291                off, &nwrite, buf);
0292     kunmap(page);
0293 
0294     kref_put(&sf_handle->refcount, vboxsf_handle_release);
0295 
0296     if (err == 0) {
0297         ClearPageError(page);
0298         /* mtime changed */
0299         sf_i->force_restat = 1;
0300     } else {
0301         ClearPageUptodate(page);
0302     }
0303 
0304     unlock_page(page);
0305     return err;
0306 }
0307 
0308 static int vboxsf_write_end(struct file *file, struct address_space *mapping,
0309                 loff_t pos, unsigned int len, unsigned int copied,
0310                 struct page *page, void *fsdata)
0311 {
0312     struct inode *inode = mapping->host;
0313     struct vboxsf_handle *sf_handle = file->private_data;
0314     unsigned int from = pos & ~PAGE_MASK;
0315     u32 nwritten = len;
0316     u8 *buf;
0317     int err;
0318 
0319     /* zero the stale part of the page if we did a short copy */
0320     if (!PageUptodate(page) && copied < len)
0321         zero_user(page, from + copied, len - copied);
0322 
0323     buf = kmap(page);
0324     err = vboxsf_write(sf_handle->root, sf_handle->handle,
0325                pos, &nwritten, buf + from);
0326     kunmap(page);
0327 
0328     if (err) {
0329         nwritten = 0;
0330         goto out;
0331     }
0332 
0333     /* mtime changed */
0334     VBOXSF_I(inode)->force_restat = 1;
0335 
0336     if (!PageUptodate(page) && nwritten == PAGE_SIZE)
0337         SetPageUptodate(page);
0338 
0339     pos += nwritten;
0340     if (pos > inode->i_size)
0341         i_size_write(inode, pos);
0342 
0343 out:
0344     unlock_page(page);
0345     put_page(page);
0346 
0347     return nwritten;
0348 }
0349 
0350 /*
0351  * Note simple_write_begin does not read the page from disk on partial writes
0352  * this is ok since vboxsf_write_end only writes the written parts of the
0353  * page and it does not call SetPageUptodate for partial writes.
0354  */
0355 const struct address_space_operations vboxsf_reg_aops = {
0356     .read_folio = vboxsf_read_folio,
0357     .writepage = vboxsf_writepage,
0358     .dirty_folio = filemap_dirty_folio,
0359     .write_begin = simple_write_begin,
0360     .write_end = vboxsf_write_end,
0361 };
0362 
0363 static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
0364                    struct delayed_call *done)
0365 {
0366     struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
0367     struct shfl_string *path;
0368     char *link;
0369     int err;
0370 
0371     if (!dentry)
0372         return ERR_PTR(-ECHILD);
0373 
0374     path = vboxsf_path_from_dentry(sbi, dentry);
0375     if (IS_ERR(path))
0376         return ERR_CAST(path);
0377 
0378     link = kzalloc(PATH_MAX, GFP_KERNEL);
0379     if (!link) {
0380         __putname(path);
0381         return ERR_PTR(-ENOMEM);
0382     }
0383 
0384     err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
0385     __putname(path);
0386     if (err) {
0387         kfree(link);
0388         return ERR_PTR(err);
0389     }
0390 
0391     set_delayed_call(done, kfree_link, link);
0392     return link;
0393 }
0394 
0395 const struct inode_operations vboxsf_lnk_iops = {
0396     .get_link = vboxsf_get_link
0397 };