Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  *  flexible mmap layout support
0004  *
0005  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
0006  * All Rights Reserved.
0007  *
0008  * Started by Ingo Molnar <mingo@elte.hu>
0009  */
0010 
0011 #include <linux/elf-randomize.h>
0012 #include <linux/personality.h>
0013 #include <linux/mm.h>
0014 #include <linux/mman.h>
0015 #include <linux/sched/signal.h>
0016 #include <linux/sched/mm.h>
0017 #include <linux/random.h>
0018 #include <linux/compat.h>
0019 #include <linux/security.h>
0020 #include <asm/elf.h>
0021 
0022 static unsigned long stack_maxrandom_size(void)
0023 {
0024     if (!(current->flags & PF_RANDOMIZE))
0025         return 0;
0026     return STACK_RND_MASK << PAGE_SHIFT;
0027 }
0028 
0029 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
0030 {
0031     if (current->personality & ADDR_COMPAT_LAYOUT)
0032         return 1;
0033     if (rlim_stack->rlim_cur == RLIM_INFINITY)
0034         return 1;
0035     return sysctl_legacy_va_layout;
0036 }
0037 
0038 unsigned long arch_mmap_rnd(void)
0039 {
0040     return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
0041 }
0042 
0043 static unsigned long mmap_base_legacy(unsigned long rnd)
0044 {
0045     return TASK_UNMAPPED_BASE + rnd;
0046 }
0047 
0048 static inline unsigned long mmap_base(unsigned long rnd,
0049                       struct rlimit *rlim_stack)
0050 {
0051     unsigned long gap = rlim_stack->rlim_cur;
0052     unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
0053     unsigned long gap_min, gap_max;
0054 
0055     /* Values close to RLIM_INFINITY can overflow. */
0056     if (gap + pad > gap)
0057         gap += pad;
0058 
0059     /*
0060      * Top of mmap area (just below the process stack).
0061      * Leave at least a ~128 MB hole.
0062      */
0063     gap_min = SZ_128M;
0064     gap_max = (STACK_TOP / 6) * 5;
0065 
0066     if (gap < gap_min)
0067         gap = gap_min;
0068     else if (gap > gap_max)
0069         gap = gap_max;
0070 
0071     return PAGE_ALIGN(STACK_TOP - gap - rnd);
0072 }
0073 
0074 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
0075                      unsigned long len, unsigned long pgoff,
0076                      unsigned long flags)
0077 {
0078     struct mm_struct *mm = current->mm;
0079     struct vm_area_struct *vma;
0080     struct vm_unmapped_area_info info;
0081 
0082     if (len > TASK_SIZE - mmap_min_addr)
0083         return -ENOMEM;
0084 
0085     if (flags & MAP_FIXED)
0086         goto check_asce_limit;
0087 
0088     if (addr) {
0089         addr = PAGE_ALIGN(addr);
0090         vma = find_vma(mm, addr);
0091         if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
0092             (!vma || addr + len <= vm_start_gap(vma)))
0093             goto check_asce_limit;
0094     }
0095 
0096     info.flags = 0;
0097     info.length = len;
0098     info.low_limit = mm->mmap_base;
0099     info.high_limit = TASK_SIZE;
0100     if (filp || (flags & MAP_SHARED))
0101         info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
0102     else
0103         info.align_mask = 0;
0104     info.align_offset = pgoff << PAGE_SHIFT;
0105     addr = vm_unmapped_area(&info);
0106     if (offset_in_page(addr))
0107         return addr;
0108 
0109 check_asce_limit:
0110     return check_asce_limit(mm, addr, len);
0111 }
0112 
0113 unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
0114                          unsigned long len, unsigned long pgoff,
0115                          unsigned long flags)
0116 {
0117     struct vm_area_struct *vma;
0118     struct mm_struct *mm = current->mm;
0119     struct vm_unmapped_area_info info;
0120 
0121     /* requested length too big for entire address space */
0122     if (len > TASK_SIZE - mmap_min_addr)
0123         return -ENOMEM;
0124 
0125     if (flags & MAP_FIXED)
0126         goto check_asce_limit;
0127 
0128     /* requesting a specific address */
0129     if (addr) {
0130         addr = PAGE_ALIGN(addr);
0131         vma = find_vma(mm, addr);
0132         if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
0133                 (!vma || addr + len <= vm_start_gap(vma)))
0134             goto check_asce_limit;
0135     }
0136 
0137     info.flags = VM_UNMAPPED_AREA_TOPDOWN;
0138     info.length = len;
0139     info.low_limit = max(PAGE_SIZE, mmap_min_addr);
0140     info.high_limit = mm->mmap_base;
0141     if (filp || (flags & MAP_SHARED))
0142         info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
0143     else
0144         info.align_mask = 0;
0145     info.align_offset = pgoff << PAGE_SHIFT;
0146     addr = vm_unmapped_area(&info);
0147 
0148     /*
0149      * A failed mmap() very likely causes application failure,
0150      * so fall back to the bottom-up function here. This scenario
0151      * can happen with large stack limits and large mmap()
0152      * allocations.
0153      */
0154     if (offset_in_page(addr)) {
0155         VM_BUG_ON(addr != -ENOMEM);
0156         info.flags = 0;
0157         info.low_limit = TASK_UNMAPPED_BASE;
0158         info.high_limit = TASK_SIZE;
0159         addr = vm_unmapped_area(&info);
0160         if (offset_in_page(addr))
0161             return addr;
0162     }
0163 
0164 check_asce_limit:
0165     return check_asce_limit(mm, addr, len);
0166 }
0167 
0168 /*
0169  * This function, called very early during the creation of a new
0170  * process VM image, sets up which VM layout function to use:
0171  */
0172 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
0173 {
0174     unsigned long random_factor = 0UL;
0175 
0176     if (current->flags & PF_RANDOMIZE)
0177         random_factor = arch_mmap_rnd();
0178 
0179     /*
0180      * Fall back to the standard layout if the personality
0181      * bit is set, or if the expected stack growth is unlimited:
0182      */
0183     if (mmap_is_legacy(rlim_stack)) {
0184         mm->mmap_base = mmap_base_legacy(random_factor);
0185         mm->get_unmapped_area = arch_get_unmapped_area;
0186     } else {
0187         mm->mmap_base = mmap_base(random_factor, rlim_stack);
0188         mm->get_unmapped_area = arch_get_unmapped_area_topdown;
0189     }
0190 }
0191 
0192 static const pgprot_t protection_map[16] = {
0193     [VM_NONE]                   = PAGE_NONE,
0194     [VM_READ]                   = PAGE_RO,
0195     [VM_WRITE]                  = PAGE_RO,
0196     [VM_WRITE | VM_READ]                = PAGE_RO,
0197     [VM_EXEC]                   = PAGE_RX,
0198     [VM_EXEC | VM_READ]             = PAGE_RX,
0199     [VM_EXEC | VM_WRITE]                = PAGE_RX,
0200     [VM_EXEC | VM_WRITE | VM_READ]          = PAGE_RX,
0201     [VM_SHARED]                 = PAGE_NONE,
0202     [VM_SHARED | VM_READ]               = PAGE_RO,
0203     [VM_SHARED | VM_WRITE]              = PAGE_RW,
0204     [VM_SHARED | VM_WRITE | VM_READ]        = PAGE_RW,
0205     [VM_SHARED | VM_EXEC]               = PAGE_RX,
0206     [VM_SHARED | VM_EXEC | VM_READ]         = PAGE_RX,
0207     [VM_SHARED | VM_EXEC | VM_WRITE]        = PAGE_RWX,
0208     [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]  = PAGE_RWX
0209 };
0210 DECLARE_VM_GET_PAGE_PROT