Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/compat.h>
0003 #include <linux/errno.h>
0004 #include <linux/sched.h>
0005 #include <linux/sched/mm.h>
0006 #include <linux/syscalls.h>
0007 #include <linux/mm.h>
0008 #include <linux/fs.h>
0009 #include <linux/smp.h>
0010 #include <linux/sem.h>
0011 #include <linux/msg.h>
0012 #include <linux/shm.h>
0013 #include <linux/stat.h>
0014 #include <linux/mman.h>
0015 #include <linux/file.h>
0016 #include <linux/utsname.h>
0017 #include <linux/personality.h>
0018 #include <linux/random.h>
0019 #include <linux/uaccess.h>
0020 #include <linux/elf.h>
0021 
0022 #include <asm/elf.h>
0023 #include <asm/ia32.h>
0024 
0025 /*
0026  * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
0027  */
0028 static unsigned long get_align_mask(void)
0029 {
0030     /* handle 32- and 64-bit case with a single conditional */
0031     if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
0032         return 0;
0033 
0034     if (!(current->flags & PF_RANDOMIZE))
0035         return 0;
0036 
0037     return va_align.mask;
0038 }
0039 
0040 /*
0041  * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
0042  * va_align.bits, [12:upper_bit), are set to a random value instead of
0043  * zeroing them. This random value is computed once per boot. This form
0044  * of ASLR is known as "per-boot ASLR".
0045  *
0046  * To achieve this, the random value is added to the info.align_offset
0047  * value before calling vm_unmapped_area() or ORed directly to the
0048  * address.
0049  */
0050 static unsigned long get_align_bits(void)
0051 {
0052     return va_align.bits & get_align_mask();
0053 }
0054 
0055 unsigned long align_vdso_addr(unsigned long addr)
0056 {
0057     unsigned long align_mask = get_align_mask();
0058     addr = (addr + align_mask) & ~align_mask;
0059     return addr | get_align_bits();
0060 }
0061 
0062 static int __init control_va_addr_alignment(char *str)
0063 {
0064     /* guard against enabling this on other CPU families */
0065     if (va_align.flags < 0)
0066         return 1;
0067 
0068     if (*str == 0)
0069         return 1;
0070 
0071     if (!strcmp(str, "32"))
0072         va_align.flags = ALIGN_VA_32;
0073     else if (!strcmp(str, "64"))
0074         va_align.flags = ALIGN_VA_64;
0075     else if (!strcmp(str, "off"))
0076         va_align.flags = 0;
0077     else if (!strcmp(str, "on"))
0078         va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
0079     else
0080         pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
0081 
0082     return 1;
0083 }
0084 __setup("align_va_addr=", control_va_addr_alignment);
0085 
0086 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
0087         unsigned long, prot, unsigned long, flags,
0088         unsigned long, fd, unsigned long, off)
0089 {
0090     if (off & ~PAGE_MASK)
0091         return -EINVAL;
0092 
0093     return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
0094 }
0095 
0096 static void find_start_end(unsigned long addr, unsigned long flags,
0097         unsigned long *begin, unsigned long *end)
0098 {
0099     if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
0100         /* This is usually used needed to map code in small
0101            model, so it needs to be in the first 31bit. Limit
0102            it to that.  This means we need to move the
0103            unmapped base down for this case. This can give
0104            conflicts with the heap, but we assume that glibc
0105            malloc knows how to fall back to mmap. Give it 1GB
0106            of playground for now. -AK */
0107         *begin = 0x40000000;
0108         *end = 0x80000000;
0109         if (current->flags & PF_RANDOMIZE) {
0110             *begin = randomize_page(*begin, 0x02000000);
0111         }
0112         return;
0113     }
0114 
0115     *begin  = get_mmap_base(1);
0116     if (in_32bit_syscall())
0117         *end = task_size_32bit();
0118     else
0119         *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
0120 }
0121 
0122 unsigned long
0123 arch_get_unmapped_area(struct file *filp, unsigned long addr,
0124         unsigned long len, unsigned long pgoff, unsigned long flags)
0125 {
0126     struct mm_struct *mm = current->mm;
0127     struct vm_area_struct *vma;
0128     struct vm_unmapped_area_info info;
0129     unsigned long begin, end;
0130 
0131     if (flags & MAP_FIXED)
0132         return addr;
0133 
0134     find_start_end(addr, flags, &begin, &end);
0135 
0136     if (len > end)
0137         return -ENOMEM;
0138 
0139     if (addr) {
0140         addr = PAGE_ALIGN(addr);
0141         vma = find_vma(mm, addr);
0142         if (end - len >= addr &&
0143             (!vma || addr + len <= vm_start_gap(vma)))
0144             return addr;
0145     }
0146 
0147     info.flags = 0;
0148     info.length = len;
0149     info.low_limit = begin;
0150     info.high_limit = end;
0151     info.align_mask = 0;
0152     info.align_offset = pgoff << PAGE_SHIFT;
0153     if (filp) {
0154         info.align_mask = get_align_mask();
0155         info.align_offset += get_align_bits();
0156     }
0157     return vm_unmapped_area(&info);
0158 }
0159 
0160 unsigned long
0161 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
0162               const unsigned long len, const unsigned long pgoff,
0163               const unsigned long flags)
0164 {
0165     struct vm_area_struct *vma;
0166     struct mm_struct *mm = current->mm;
0167     unsigned long addr = addr0;
0168     struct vm_unmapped_area_info info;
0169 
0170     /* requested length too big for entire address space */
0171     if (len > TASK_SIZE)
0172         return -ENOMEM;
0173 
0174     /* No address checking. See comment at mmap_address_hint_valid() */
0175     if (flags & MAP_FIXED)
0176         return addr;
0177 
0178     /* for MAP_32BIT mappings we force the legacy mmap base */
0179     if (!in_32bit_syscall() && (flags & MAP_32BIT))
0180         goto bottomup;
0181 
0182     /* requesting a specific address */
0183     if (addr) {
0184         addr &= PAGE_MASK;
0185         if (!mmap_address_hint_valid(addr, len))
0186             goto get_unmapped_area;
0187 
0188         vma = find_vma(mm, addr);
0189         if (!vma || addr + len <= vm_start_gap(vma))
0190             return addr;
0191     }
0192 get_unmapped_area:
0193 
0194     info.flags = VM_UNMAPPED_AREA_TOPDOWN;
0195     info.length = len;
0196     info.low_limit = PAGE_SIZE;
0197     info.high_limit = get_mmap_base(0);
0198 
0199     /*
0200      * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
0201      * in the full address space.
0202      *
0203      * !in_32bit_syscall() check to avoid high addresses for x32
0204      * (and make it no op on native i386).
0205      */
0206     if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
0207         info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
0208 
0209     info.align_mask = 0;
0210     info.align_offset = pgoff << PAGE_SHIFT;
0211     if (filp) {
0212         info.align_mask = get_align_mask();
0213         info.align_offset += get_align_bits();
0214     }
0215     addr = vm_unmapped_area(&info);
0216     if (!(addr & ~PAGE_MASK))
0217         return addr;
0218     VM_BUG_ON(addr != -ENOMEM);
0219 
0220 bottomup:
0221     /*
0222      * A failed mmap() very likely causes application failure,
0223      * so fall back to the bottom-up function here. This scenario
0224      * can happen with large stack limits and large mmap()
0225      * allocations.
0226      */
0227     return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
0228 }