Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  linux/arch/arm/mm/mmap.c
0004  */
0005 #include <linux/fs.h>
0006 #include <linux/mm.h>
0007 #include <linux/mman.h>
0008 #include <linux/shm.h>
0009 #include <linux/sched/signal.h>
0010 #include <linux/sched/mm.h>
0011 #include <linux/io.h>
0012 #include <linux/personality.h>
0013 #include <linux/random.h>
0014 #include <asm/cachetype.h>
0015 
0016 #define COLOUR_ALIGN(addr,pgoff)        \
0017     ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +  \
0018      (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
0019 
0020 /*
0021  * We need to ensure that shared mappings are correctly aligned to
0022  * avoid aliasing issues with VIPT caches.  We need to ensure that
0023  * a specific page of an object is always mapped at a multiple of
0024  * SHMLBA bytes.
0025  *
0026  * We unconditionally provide this function for all cases, however
0027  * in the VIVT case, we optimise out the alignment rules.
0028  */
0029 unsigned long
0030 arch_get_unmapped_area(struct file *filp, unsigned long addr,
0031         unsigned long len, unsigned long pgoff, unsigned long flags)
0032 {
0033     struct mm_struct *mm = current->mm;
0034     struct vm_area_struct *vma;
0035     int do_align = 0;
0036     int aliasing = cache_is_vipt_aliasing();
0037     struct vm_unmapped_area_info info;
0038 
0039     /*
0040      * We only need to do colour alignment if either the I or D
0041      * caches alias.
0042      */
0043     if (aliasing)
0044         do_align = filp || (flags & MAP_SHARED);
0045 
0046     /*
0047      * We enforce the MAP_FIXED case.
0048      */
0049     if (flags & MAP_FIXED) {
0050         if (aliasing && flags & MAP_SHARED &&
0051             (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
0052             return -EINVAL;
0053         return addr;
0054     }
0055 
0056     if (len > TASK_SIZE)
0057         return -ENOMEM;
0058 
0059     if (addr) {
0060         if (do_align)
0061             addr = COLOUR_ALIGN(addr, pgoff);
0062         else
0063             addr = PAGE_ALIGN(addr);
0064 
0065         vma = find_vma(mm, addr);
0066         if (TASK_SIZE - len >= addr &&
0067             (!vma || addr + len <= vm_start_gap(vma)))
0068             return addr;
0069     }
0070 
0071     info.flags = 0;
0072     info.length = len;
0073     info.low_limit = mm->mmap_base;
0074     info.high_limit = TASK_SIZE;
0075     info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
0076     info.align_offset = pgoff << PAGE_SHIFT;
0077     return vm_unmapped_area(&info);
0078 }
0079 
0080 unsigned long
0081 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
0082             const unsigned long len, const unsigned long pgoff,
0083             const unsigned long flags)
0084 {
0085     struct vm_area_struct *vma;
0086     struct mm_struct *mm = current->mm;
0087     unsigned long addr = addr0;
0088     int do_align = 0;
0089     int aliasing = cache_is_vipt_aliasing();
0090     struct vm_unmapped_area_info info;
0091 
0092     /*
0093      * We only need to do colour alignment if either the I or D
0094      * caches alias.
0095      */
0096     if (aliasing)
0097         do_align = filp || (flags & MAP_SHARED);
0098 
0099     /* requested length too big for entire address space */
0100     if (len > TASK_SIZE)
0101         return -ENOMEM;
0102 
0103     if (flags & MAP_FIXED) {
0104         if (aliasing && flags & MAP_SHARED &&
0105             (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
0106             return -EINVAL;
0107         return addr;
0108     }
0109 
0110     /* requesting a specific address */
0111     if (addr) {
0112         if (do_align)
0113             addr = COLOUR_ALIGN(addr, pgoff);
0114         else
0115             addr = PAGE_ALIGN(addr);
0116         vma = find_vma(mm, addr);
0117         if (TASK_SIZE - len >= addr &&
0118                 (!vma || addr + len <= vm_start_gap(vma)))
0119             return addr;
0120     }
0121 
0122     info.flags = VM_UNMAPPED_AREA_TOPDOWN;
0123     info.length = len;
0124     info.low_limit = FIRST_USER_ADDRESS;
0125     info.high_limit = mm->mmap_base;
0126     info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
0127     info.align_offset = pgoff << PAGE_SHIFT;
0128     addr = vm_unmapped_area(&info);
0129 
0130     /*
0131      * A failed mmap() very likely causes application failure,
0132      * so fall back to the bottom-up function here. This scenario
0133      * can happen with large stack limits and large mmap()
0134      * allocations.
0135      */
0136     if (addr & ~PAGE_MASK) {
0137         VM_BUG_ON(addr != -ENOMEM);
0138         info.flags = 0;
0139         info.low_limit = mm->mmap_base;
0140         info.high_limit = TASK_SIZE;
0141         addr = vm_unmapped_area(&info);
0142     }
0143 
0144     return addr;
0145 }
0146 
0147 /*
0148  * You really shouldn't be using read() or write() on /dev/mem.  This
0149  * might go away in the future.
0150  */
0151 int valid_phys_addr_range(phys_addr_t addr, size_t size)
0152 {
0153     if (addr < PHYS_OFFSET)
0154         return 0;
0155     if (addr + size > __pa(high_memory - 1) + 1)
0156         return 0;
0157 
0158     return 1;
0159 }
0160 
0161 /*
0162  * Do not allow /dev/mem mappings beyond the supported physical range.
0163  */
0164 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
0165 {
0166     return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
0167 }