Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* file-nommu.c: no-MMU version of ramfs
0003  *
0004  * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
0005  * Written by David Howells (dhowells@redhat.com)
0006  */
0007 
0008 #include <linux/module.h>
0009 #include <linux/fs.h>
0010 #include <linux/mm.h>
0011 #include <linux/pagemap.h>
0012 #include <linux/highmem.h>
0013 #include <linux/init.h>
0014 #include <linux/string.h>
0015 #include <linux/backing-dev.h>
0016 #include <linux/ramfs.h>
0017 #include <linux/pagevec.h>
0018 #include <linux/mman.h>
0019 #include <linux/sched.h>
0020 #include <linux/slab.h>
0021 
0022 #include <linux/uaccess.h>
0023 #include "internal.h"
0024 
0025 static int ramfs_nommu_setattr(struct user_namespace *, struct dentry *, struct iattr *);
0026 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
0027                            unsigned long addr,
0028                            unsigned long len,
0029                            unsigned long pgoff,
0030                            unsigned long flags);
0031 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
0032 
0033 static unsigned ramfs_mmap_capabilities(struct file *file)
0034 {
0035     return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ |
0036         NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
0037 }
0038 
0039 const struct file_operations ramfs_file_operations = {
0040     .mmap_capabilities  = ramfs_mmap_capabilities,
0041     .mmap           = ramfs_nommu_mmap,
0042     .get_unmapped_area  = ramfs_nommu_get_unmapped_area,
0043     .read_iter      = generic_file_read_iter,
0044     .write_iter     = generic_file_write_iter,
0045     .fsync          = noop_fsync,
0046     .splice_read        = generic_file_splice_read,
0047     .splice_write       = iter_file_splice_write,
0048     .llseek         = generic_file_llseek,
0049 };
0050 
0051 const struct inode_operations ramfs_file_inode_operations = {
0052     .setattr        = ramfs_nommu_setattr,
0053     .getattr        = simple_getattr,
0054 };
0055 
0056 /*****************************************************************************/
0057 /*
0058  * add a contiguous set of pages into a ramfs inode when it's truncated from
0059  * size 0 on the assumption that it's going to be used for an mmap of shared
0060  * memory
0061  */
0062 int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
0063 {
0064     unsigned long npages, xpages, loop;
0065     struct page *pages;
0066     unsigned order;
0067     void *data;
0068     int ret;
0069     gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
0070 
0071     /* make various checks */
0072     order = get_order(newsize);
0073     if (unlikely(order >= MAX_ORDER))
0074         return -EFBIG;
0075 
0076     ret = inode_newsize_ok(inode, newsize);
0077     if (ret)
0078         return ret;
0079 
0080     i_size_write(inode, newsize);
0081 
0082     /* allocate enough contiguous pages to be able to satisfy the
0083      * request */
0084     pages = alloc_pages(gfp, order);
0085     if (!pages)
0086         return -ENOMEM;
0087 
0088     /* split the high-order page into an array of single pages */
0089     xpages = 1UL << order;
0090     npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
0091 
0092     split_page(pages, order);
0093 
0094     /* trim off any pages we don't actually require */
0095     for (loop = npages; loop < xpages; loop++)
0096         __free_page(pages + loop);
0097 
0098     /* clear the memory we allocated */
0099     newsize = PAGE_SIZE * npages;
0100     data = page_address(pages);
0101     memset(data, 0, newsize);
0102 
0103     /* attach all the pages to the inode's address space */
0104     for (loop = 0; loop < npages; loop++) {
0105         struct page *page = pages + loop;
0106 
0107         ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
0108                     gfp);
0109         if (ret < 0)
0110             goto add_error;
0111 
0112         /* prevent the page from being discarded on memory pressure */
0113         SetPageDirty(page);
0114         SetPageUptodate(page);
0115 
0116         unlock_page(page);
0117         put_page(page);
0118     }
0119 
0120     return 0;
0121 
0122 add_error:
0123     while (loop < npages)
0124         __free_page(pages + loop++);
0125     return ret;
0126 }
0127 
0128 /*****************************************************************************/
0129 /*
0130  *
0131  */
0132 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
0133 {
0134     int ret;
0135 
0136     /* assume a truncate from zero size is going to be for the purposes of
0137      * shared mmap */
0138     if (size == 0) {
0139         if (unlikely(newsize >> 32))
0140             return -EFBIG;
0141 
0142         return ramfs_nommu_expand_for_mapping(inode, newsize);
0143     }
0144 
0145     /* check that a decrease in size doesn't cut off any shared mappings */
0146     if (newsize < size) {
0147         ret = nommu_shrink_inode_mappings(inode, size, newsize);
0148         if (ret < 0)
0149             return ret;
0150     }
0151 
0152     truncate_setsize(inode, newsize);
0153     return 0;
0154 }
0155 
0156 /*****************************************************************************/
0157 /*
0158  * handle a change of attributes
0159  * - we're specifically interested in a change of size
0160  */
0161 static int ramfs_nommu_setattr(struct user_namespace *mnt_userns,
0162                    struct dentry *dentry, struct iattr *ia)
0163 {
0164     struct inode *inode = d_inode(dentry);
0165     unsigned int old_ia_valid = ia->ia_valid;
0166     int ret = 0;
0167 
0168     /* POSIX UID/GID verification for setting inode attributes */
0169     ret = setattr_prepare(&init_user_ns, dentry, ia);
0170     if (ret)
0171         return ret;
0172 
0173     /* pick out size-changing events */
0174     if (ia->ia_valid & ATTR_SIZE) {
0175         loff_t size = inode->i_size;
0176 
0177         if (ia->ia_size != size) {
0178             ret = ramfs_nommu_resize(inode, ia->ia_size, size);
0179             if (ret < 0 || ia->ia_valid == ATTR_SIZE)
0180                 goto out;
0181         } else {
0182             /* we skipped the truncate but must still update
0183              * timestamps
0184              */
0185             ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
0186         }
0187     }
0188 
0189     setattr_copy(&init_user_ns, inode, ia);
0190  out:
0191     ia->ia_valid = old_ia_valid;
0192     return ret;
0193 }
0194 
0195 /*****************************************************************************/
0196 /*
0197  * try to determine where a shared mapping can be made
0198  * - we require that:
0199  *   - the pages to be mapped must exist
0200  *   - the pages be physically contiguous in sequence
0201  */
0202 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
0203                         unsigned long addr, unsigned long len,
0204                         unsigned long pgoff, unsigned long flags)
0205 {
0206     unsigned long maxpages, lpages, nr, loop, ret;
0207     struct inode *inode = file_inode(file);
0208     struct page **pages = NULL, **ptr, *page;
0209     loff_t isize;
0210 
0211     /* the mapping mustn't extend beyond the EOF */
0212     lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
0213     isize = i_size_read(inode);
0214 
0215     ret = -ENOSYS;
0216     maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
0217     if (pgoff >= maxpages)
0218         goto out;
0219 
0220     if (maxpages - pgoff < lpages)
0221         goto out;
0222 
0223     /* gang-find the pages */
0224     pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL);
0225     if (!pages)
0226         goto out_free;
0227 
0228     nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
0229     if (nr != lpages)
0230         goto out_free_pages; /* leave if some pages were missing */
0231 
0232     /* check the pages for physical adjacency */
0233     ptr = pages;
0234     page = *ptr++;
0235     page++;
0236     for (loop = lpages; loop > 1; loop--)
0237         if (*ptr++ != page++)
0238             goto out_free_pages;
0239 
0240     /* okay - all conditions fulfilled */
0241     ret = (unsigned long) page_address(pages[0]);
0242 
0243 out_free_pages:
0244     ptr = pages;
0245     for (loop = nr; loop > 0; loop--)
0246         put_page(*ptr++);
0247 out_free:
0248     kfree(pages);
0249 out:
0250     return ret;
0251 }
0252 
0253 /*****************************************************************************/
0254 /*
0255  * set up a mapping for shared memory segments
0256  */
0257 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
0258 {
0259     if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
0260         return -ENOSYS;
0261 
0262     file_accessed(file);
0263     vma->vm_ops = &generic_file_vm_ops;
0264     return 0;
0265 }