0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0018
0019 #include <linux/export.h>
0020 #include <linux/mm.h>
0021 #include <linux/sched/mm.h>
0022 #include <linux/vmacache.h>
0023 #include <linux/mman.h>
0024 #include <linux/swap.h>
0025 #include <linux/file.h>
0026 #include <linux/highmem.h>
0027 #include <linux/pagemap.h>
0028 #include <linux/slab.h>
0029 #include <linux/vmalloc.h>
0030 #include <linux/backing-dev.h>
0031 #include <linux/compiler.h>
0032 #include <linux/mount.h>
0033 #include <linux/personality.h>
0034 #include <linux/security.h>
0035 #include <linux/syscalls.h>
0036 #include <linux/audit.h>
0037 #include <linux/printk.h>
0038
0039 #include <linux/uaccess.h>
0040 #include <asm/tlb.h>
0041 #include <asm/tlbflush.h>
0042 #include <asm/mmu_context.h>
0043 #include "internal.h"
0044
0045 void *high_memory;
0046 EXPORT_SYMBOL(high_memory);
0047 struct page *mem_map;
0048 unsigned long max_mapnr;
0049 EXPORT_SYMBOL(max_mapnr);
0050 unsigned long highest_memmap_pfn;
0051 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
0052 int heap_stack_gap = 0;
0053
0054 atomic_long_t mmap_pages_allocated;
0055
0056 EXPORT_SYMBOL(mem_map);
0057
0058
0059 static struct kmem_cache *vm_region_jar;
0060 struct rb_root nommu_region_tree = RB_ROOT;
0061 DECLARE_RWSEM(nommu_region_sem);
0062
0063 const struct vm_operations_struct generic_file_vm_ops = {
0064 };
0065
0066
0067
0068
0069
0070
0071
0072 unsigned int kobjsize(const void *objp)
0073 {
0074 struct page *page;
0075
0076
0077
0078
0079
0080 if (!objp || !virt_addr_valid(objp))
0081 return 0;
0082
0083 page = virt_to_head_page(objp);
0084
0085
0086
0087
0088
0089 if (PageSlab(page))
0090 return ksize(objp);
0091
0092
0093
0094
0095
0096
0097
0098 if (!PageCompound(page)) {
0099 struct vm_area_struct *vma;
0100
0101 vma = find_vma(current->mm, (unsigned long)objp);
0102 if (vma)
0103 return vma->vm_end - vma->vm_start;
0104 }
0105
0106
0107
0108
0109
0110 return page_size(page);
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
0124 unsigned long *pfn)
0125 {
0126 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
0127 return -EINVAL;
0128
0129 *pfn = address >> PAGE_SHIFT;
0130 return 0;
0131 }
0132 EXPORT_SYMBOL(follow_pfn);
0133
0134 LIST_HEAD(vmap_area_list);
0135
0136 void vfree(const void *addr)
0137 {
0138 kfree(addr);
0139 }
0140 EXPORT_SYMBOL(vfree);
0141
0142 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
0143 {
0144
0145
0146
0147
0148 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
0149 }
0150 EXPORT_SYMBOL(__vmalloc);
0151
0152 void *__vmalloc_node_range(unsigned long size, unsigned long align,
0153 unsigned long start, unsigned long end, gfp_t gfp_mask,
0154 pgprot_t prot, unsigned long vm_flags, int node,
0155 const void *caller)
0156 {
0157 return __vmalloc(size, gfp_mask);
0158 }
0159
0160 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
0161 int node, const void *caller)
0162 {
0163 return __vmalloc(size, gfp_mask);
0164 }
0165
0166 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
0167 {
0168 void *ret;
0169
0170 ret = __vmalloc(size, flags);
0171 if (ret) {
0172 struct vm_area_struct *vma;
0173
0174 mmap_write_lock(current->mm);
0175 vma = find_vma(current->mm, (unsigned long)ret);
0176 if (vma)
0177 vma->vm_flags |= VM_USERMAP;
0178 mmap_write_unlock(current->mm);
0179 }
0180
0181 return ret;
0182 }
0183
0184 void *vmalloc_user(unsigned long size)
0185 {
0186 return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
0187 }
0188 EXPORT_SYMBOL(vmalloc_user);
0189
0190 struct page *vmalloc_to_page(const void *addr)
0191 {
0192 return virt_to_page(addr);
0193 }
0194 EXPORT_SYMBOL(vmalloc_to_page);
0195
0196 unsigned long vmalloc_to_pfn(const void *addr)
0197 {
0198 return page_to_pfn(virt_to_page(addr));
0199 }
0200 EXPORT_SYMBOL(vmalloc_to_pfn);
0201
0202 long vread(char *buf, char *addr, unsigned long count)
0203 {
0204
0205 if ((unsigned long) buf + count < count)
0206 count = -(unsigned long) buf;
0207
0208 memcpy(buf, addr, count);
0209 return count;
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 void *vmalloc(unsigned long size)
0224 {
0225 return __vmalloc(size, GFP_KERNEL);
0226 }
0227 EXPORT_SYMBOL(vmalloc);
0228
0229 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243 void *vzalloc(unsigned long size)
0244 {
0245 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
0246 }
0247 EXPORT_SYMBOL(vzalloc);
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 void *vmalloc_node(unsigned long size, int node)
0261 {
0262 return vmalloc(size);
0263 }
0264 EXPORT_SYMBOL(vmalloc_node);
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 void *vzalloc_node(unsigned long size, int node)
0279 {
0280 return vzalloc(size);
0281 }
0282 EXPORT_SYMBOL(vzalloc_node);
0283
0284
0285
0286
0287
0288
0289
0290
0291 void *vmalloc_32(unsigned long size)
0292 {
0293 return __vmalloc(size, GFP_KERNEL);
0294 }
0295 EXPORT_SYMBOL(vmalloc_32);
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 void *vmalloc_32_user(unsigned long size)
0308 {
0309
0310
0311
0312
0313 return vmalloc_user(size);
0314 }
0315 EXPORT_SYMBOL(vmalloc_32_user);
0316
0317 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
0318 {
0319 BUG();
0320 return NULL;
0321 }
0322 EXPORT_SYMBOL(vmap);
0323
0324 void vunmap(const void *addr)
0325 {
0326 BUG();
0327 }
0328 EXPORT_SYMBOL(vunmap);
0329
0330 void *vm_map_ram(struct page **pages, unsigned int count, int node)
0331 {
0332 BUG();
0333 return NULL;
0334 }
0335 EXPORT_SYMBOL(vm_map_ram);
0336
0337 void vm_unmap_ram(const void *mem, unsigned int count)
0338 {
0339 BUG();
0340 }
0341 EXPORT_SYMBOL(vm_unmap_ram);
0342
0343 void vm_unmap_aliases(void)
0344 {
0345 }
0346 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
0347
0348 void free_vm_area(struct vm_struct *area)
0349 {
0350 BUG();
0351 }
0352 EXPORT_SYMBOL_GPL(free_vm_area);
0353
0354 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
0355 struct page *page)
0356 {
0357 return -EINVAL;
0358 }
0359 EXPORT_SYMBOL(vm_insert_page);
0360
0361 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
0362 unsigned long num)
0363 {
0364 return -EINVAL;
0365 }
0366 EXPORT_SYMBOL(vm_map_pages);
0367
0368 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
0369 unsigned long num)
0370 {
0371 return -EINVAL;
0372 }
0373 EXPORT_SYMBOL(vm_map_pages_zero);
0374
0375
0376
0377
0378
0379
0380
0381
0382 SYSCALL_DEFINE1(brk, unsigned long, brk)
0383 {
0384 struct mm_struct *mm = current->mm;
0385
0386 if (brk < mm->start_brk || brk > mm->context.end_brk)
0387 return mm->brk;
0388
0389 if (mm->brk == brk)
0390 return mm->brk;
0391
0392
0393
0394
0395 if (brk <= mm->brk) {
0396 mm->brk = brk;
0397 return brk;
0398 }
0399
0400
0401
0402
0403 flush_icache_user_range(mm->brk, brk);
0404 return mm->brk = brk;
0405 }
0406
0407
0408
0409
0410 void __init mmap_init(void)
0411 {
0412 int ret;
0413
0414 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
0415 VM_BUG_ON(ret);
0416 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
0417 }
0418
0419
0420
0421
0422
0423 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
0424 static noinline void validate_nommu_regions(void)
0425 {
0426 struct vm_region *region, *last;
0427 struct rb_node *p, *lastp;
0428
0429 lastp = rb_first(&nommu_region_tree);
0430 if (!lastp)
0431 return;
0432
0433 last = rb_entry(lastp, struct vm_region, vm_rb);
0434 BUG_ON(last->vm_end <= last->vm_start);
0435 BUG_ON(last->vm_top < last->vm_end);
0436
0437 while ((p = rb_next(lastp))) {
0438 region = rb_entry(p, struct vm_region, vm_rb);
0439 last = rb_entry(lastp, struct vm_region, vm_rb);
0440
0441 BUG_ON(region->vm_end <= region->vm_start);
0442 BUG_ON(region->vm_top < region->vm_end);
0443 BUG_ON(region->vm_start < last->vm_top);
0444
0445 lastp = p;
0446 }
0447 }
0448 #else
0449 static void validate_nommu_regions(void)
0450 {
0451 }
0452 #endif
0453
0454
0455
0456
0457 static void add_nommu_region(struct vm_region *region)
0458 {
0459 struct vm_region *pregion;
0460 struct rb_node **p, *parent;
0461
0462 validate_nommu_regions();
0463
0464 parent = NULL;
0465 p = &nommu_region_tree.rb_node;
0466 while (*p) {
0467 parent = *p;
0468 pregion = rb_entry(parent, struct vm_region, vm_rb);
0469 if (region->vm_start < pregion->vm_start)
0470 p = &(*p)->rb_left;
0471 else if (region->vm_start > pregion->vm_start)
0472 p = &(*p)->rb_right;
0473 else if (pregion == region)
0474 return;
0475 else
0476 BUG();
0477 }
0478
0479 rb_link_node(®ion->vm_rb, parent, p);
0480 rb_insert_color(®ion->vm_rb, &nommu_region_tree);
0481
0482 validate_nommu_regions();
0483 }
0484
0485
0486
0487
0488 static void delete_nommu_region(struct vm_region *region)
0489 {
0490 BUG_ON(!nommu_region_tree.rb_node);
0491
0492 validate_nommu_regions();
0493 rb_erase(®ion->vm_rb, &nommu_region_tree);
0494 validate_nommu_regions();
0495 }
0496
0497
0498
0499
0500 static void free_page_series(unsigned long from, unsigned long to)
0501 {
0502 for (; from < to; from += PAGE_SIZE) {
0503 struct page *page = virt_to_page((void *)from);
0504
0505 atomic_long_dec(&mmap_pages_allocated);
0506 put_page(page);
0507 }
0508 }
0509
0510
0511
0512
0513
0514
0515
0516 static void __put_nommu_region(struct vm_region *region)
0517 __releases(nommu_region_sem)
0518 {
0519 BUG_ON(!nommu_region_tree.rb_node);
0520
0521 if (--region->vm_usage == 0) {
0522 if (region->vm_top > region->vm_start)
0523 delete_nommu_region(region);
0524 up_write(&nommu_region_sem);
0525
0526 if (region->vm_file)
0527 fput(region->vm_file);
0528
0529
0530
0531 if (region->vm_flags & VM_MAPPED_COPY)
0532 free_page_series(region->vm_start, region->vm_top);
0533 kmem_cache_free(vm_region_jar, region);
0534 } else {
0535 up_write(&nommu_region_sem);
0536 }
0537 }
0538
0539
0540
0541
0542 static void put_nommu_region(struct vm_region *region)
0543 {
0544 down_write(&nommu_region_sem);
0545 __put_nommu_region(region);
0546 }
0547
0548
0549
0550
0551
0552
0553
0554 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
0555 {
0556 struct vm_area_struct *pvma, *prev;
0557 struct address_space *mapping;
0558 struct rb_node **p, *parent, *rb_prev;
0559
0560 BUG_ON(!vma->vm_region);
0561
0562 mm->map_count++;
0563 vma->vm_mm = mm;
0564
0565
0566 if (vma->vm_file) {
0567 mapping = vma->vm_file->f_mapping;
0568
0569 i_mmap_lock_write(mapping);
0570 flush_dcache_mmap_lock(mapping);
0571 vma_interval_tree_insert(vma, &mapping->i_mmap);
0572 flush_dcache_mmap_unlock(mapping);
0573 i_mmap_unlock_write(mapping);
0574 }
0575
0576
0577 parent = rb_prev = NULL;
0578 p = &mm->mm_rb.rb_node;
0579 while (*p) {
0580 parent = *p;
0581 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
0582
0583
0584
0585 if (vma->vm_start < pvma->vm_start)
0586 p = &(*p)->rb_left;
0587 else if (vma->vm_start > pvma->vm_start) {
0588 rb_prev = parent;
0589 p = &(*p)->rb_right;
0590 } else if (vma->vm_end < pvma->vm_end)
0591 p = &(*p)->rb_left;
0592 else if (vma->vm_end > pvma->vm_end) {
0593 rb_prev = parent;
0594 p = &(*p)->rb_right;
0595 } else if (vma < pvma)
0596 p = &(*p)->rb_left;
0597 else if (vma > pvma) {
0598 rb_prev = parent;
0599 p = &(*p)->rb_right;
0600 } else
0601 BUG();
0602 }
0603
0604 rb_link_node(&vma->vm_rb, parent, p);
0605 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
0606
0607
0608 prev = NULL;
0609 if (rb_prev)
0610 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
0611
0612 __vma_link_list(mm, vma, prev);
0613 }
0614
0615
0616
0617
0618 static void delete_vma_from_mm(struct vm_area_struct *vma)
0619 {
0620 int i;
0621 struct address_space *mapping;
0622 struct mm_struct *mm = vma->vm_mm;
0623 struct task_struct *curr = current;
0624
0625 mm->map_count--;
0626 for (i = 0; i < VMACACHE_SIZE; i++) {
0627
0628 if (curr->vmacache.vmas[i] == vma) {
0629 vmacache_invalidate(mm);
0630 break;
0631 }
0632 }
0633
0634
0635 if (vma->vm_file) {
0636 mapping = vma->vm_file->f_mapping;
0637
0638 i_mmap_lock_write(mapping);
0639 flush_dcache_mmap_lock(mapping);
0640 vma_interval_tree_remove(vma, &mapping->i_mmap);
0641 flush_dcache_mmap_unlock(mapping);
0642 i_mmap_unlock_write(mapping);
0643 }
0644
0645
0646 rb_erase(&vma->vm_rb, &mm->mm_rb);
0647
0648 __vma_unlink_list(mm, vma);
0649 }
0650
0651
0652
0653
0654 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
0655 {
0656 if (vma->vm_ops && vma->vm_ops->close)
0657 vma->vm_ops->close(vma);
0658 if (vma->vm_file)
0659 fput(vma->vm_file);
0660 put_nommu_region(vma->vm_region);
0661 vm_area_free(vma);
0662 }
0663
0664
0665
0666
0667
0668 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
0669 {
0670 struct vm_area_struct *vma;
0671
0672
0673 vma = vmacache_find(mm, addr);
0674 if (likely(vma))
0675 return vma;
0676
0677
0678
0679 for (vma = mm->mmap; vma; vma = vma->vm_next) {
0680 if (vma->vm_start > addr)
0681 return NULL;
0682 if (vma->vm_end > addr) {
0683 vmacache_update(addr, vma);
0684 return vma;
0685 }
0686 }
0687
0688 return NULL;
0689 }
0690 EXPORT_SYMBOL(find_vma);
0691
0692
0693
0694
0695
0696 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
0697 {
0698 return find_vma(mm, addr);
0699 }
0700
0701
0702
0703
0704
0705 int expand_stack(struct vm_area_struct *vma, unsigned long address)
0706 {
0707 return -ENOMEM;
0708 }
0709
0710
0711
0712
0713
0714 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
0715 unsigned long addr,
0716 unsigned long len)
0717 {
0718 struct vm_area_struct *vma;
0719 unsigned long end = addr + len;
0720
0721
0722 vma = vmacache_find_exact(mm, addr, end);
0723 if (vma)
0724 return vma;
0725
0726
0727
0728 for (vma = mm->mmap; vma; vma = vma->vm_next) {
0729 if (vma->vm_start < addr)
0730 continue;
0731 if (vma->vm_start > addr)
0732 return NULL;
0733 if (vma->vm_end == end) {
0734 vmacache_update(addr, vma);
0735 return vma;
0736 }
0737 }
0738
0739 return NULL;
0740 }
0741
0742
0743
0744
0745
0746 static int validate_mmap_request(struct file *file,
0747 unsigned long addr,
0748 unsigned long len,
0749 unsigned long prot,
0750 unsigned long flags,
0751 unsigned long pgoff,
0752 unsigned long *_capabilities)
0753 {
0754 unsigned long capabilities, rlen;
0755 int ret;
0756
0757
0758 if (flags & MAP_FIXED)
0759 return -EINVAL;
0760
0761 if ((flags & MAP_TYPE) != MAP_PRIVATE &&
0762 (flags & MAP_TYPE) != MAP_SHARED)
0763 return -EINVAL;
0764
0765 if (!len)
0766 return -EINVAL;
0767
0768
0769 rlen = PAGE_ALIGN(len);
0770 if (!rlen || rlen > TASK_SIZE)
0771 return -ENOMEM;
0772
0773
0774 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
0775 return -EOVERFLOW;
0776
0777 if (file) {
0778
0779 if (!file->f_op->mmap)
0780 return -ENODEV;
0781
0782
0783
0784
0785
0786 if (file->f_op->mmap_capabilities) {
0787 capabilities = file->f_op->mmap_capabilities(file);
0788 } else {
0789
0790
0791 switch (file_inode(file)->i_mode & S_IFMT) {
0792 case S_IFREG:
0793 case S_IFBLK:
0794 capabilities = NOMMU_MAP_COPY;
0795 break;
0796
0797 case S_IFCHR:
0798 capabilities =
0799 NOMMU_MAP_DIRECT |
0800 NOMMU_MAP_READ |
0801 NOMMU_MAP_WRITE;
0802 break;
0803
0804 default:
0805 return -EINVAL;
0806 }
0807 }
0808
0809
0810
0811 if (!file->f_op->get_unmapped_area)
0812 capabilities &= ~NOMMU_MAP_DIRECT;
0813 if (!(file->f_mode & FMODE_CAN_READ))
0814 capabilities &= ~NOMMU_MAP_COPY;
0815
0816
0817 if (!(file->f_mode & FMODE_READ))
0818 return -EACCES;
0819
0820 if (flags & MAP_SHARED) {
0821
0822 if ((prot & PROT_WRITE) &&
0823 !(file->f_mode & FMODE_WRITE))
0824 return -EACCES;
0825
0826 if (IS_APPEND(file_inode(file)) &&
0827 (file->f_mode & FMODE_WRITE))
0828 return -EACCES;
0829
0830 if (!(capabilities & NOMMU_MAP_DIRECT))
0831 return -ENODEV;
0832
0833
0834 capabilities &= ~NOMMU_MAP_COPY;
0835 } else {
0836
0837
0838 if (!(capabilities & NOMMU_MAP_COPY))
0839 return -ENODEV;
0840
0841
0842
0843 if (prot & PROT_WRITE)
0844 capabilities &= ~NOMMU_MAP_DIRECT;
0845 }
0846
0847 if (capabilities & NOMMU_MAP_DIRECT) {
0848 if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) ||
0849 ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
0850 ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC))
0851 ) {
0852 capabilities &= ~NOMMU_MAP_DIRECT;
0853 if (flags & MAP_SHARED) {
0854 pr_warn("MAP_SHARED not completely supported on !MMU\n");
0855 return -EINVAL;
0856 }
0857 }
0858 }
0859
0860
0861
0862 if (path_noexec(&file->f_path)) {
0863 if (prot & PROT_EXEC)
0864 return -EPERM;
0865 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
0866
0867 if (current->personality & READ_IMPLIES_EXEC) {
0868 if (capabilities & NOMMU_MAP_EXEC)
0869 prot |= PROT_EXEC;
0870 }
0871 } else if ((prot & PROT_READ) &&
0872 (prot & PROT_EXEC) &&
0873 !(capabilities & NOMMU_MAP_EXEC)
0874 ) {
0875
0876 capabilities &= ~NOMMU_MAP_DIRECT;
0877 }
0878 } else {
0879
0880
0881
0882 capabilities = NOMMU_MAP_COPY;
0883
0884
0885 if ((prot & PROT_READ) &&
0886 (current->personality & READ_IMPLIES_EXEC))
0887 prot |= PROT_EXEC;
0888 }
0889
0890
0891 ret = security_mmap_addr(addr);
0892 if (ret < 0)
0893 return ret;
0894
0895
0896 *_capabilities = capabilities;
0897 return 0;
0898 }
0899
0900
0901
0902
0903
0904 static unsigned long determine_vm_flags(struct file *file,
0905 unsigned long prot,
0906 unsigned long flags,
0907 unsigned long capabilities)
0908 {
0909 unsigned long vm_flags;
0910
0911 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
0912
0913
0914 if (!(capabilities & NOMMU_MAP_DIRECT)) {
0915
0916 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
0917 if (file && !(prot & PROT_WRITE))
0918 vm_flags |= VM_MAYSHARE;
0919 } else {
0920
0921
0922
0923 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
0924 if (flags & MAP_SHARED)
0925 vm_flags |= VM_SHARED;
0926 }
0927
0928
0929
0930
0931
0932 if ((flags & MAP_PRIVATE) && current->ptrace)
0933 vm_flags &= ~VM_MAYSHARE;
0934
0935 return vm_flags;
0936 }
0937
0938
0939
0940
0941
0942 static int do_mmap_shared_file(struct vm_area_struct *vma)
0943 {
0944 int ret;
0945
0946 ret = call_mmap(vma->vm_file, vma);
0947 if (ret == 0) {
0948 vma->vm_region->vm_top = vma->vm_region->vm_end;
0949 return 0;
0950 }
0951 if (ret != -ENOSYS)
0952 return ret;
0953
0954
0955
0956
0957 return -ENODEV;
0958 }
0959
0960
0961
0962
0963 static int do_mmap_private(struct vm_area_struct *vma,
0964 struct vm_region *region,
0965 unsigned long len,
0966 unsigned long capabilities)
0967 {
0968 unsigned long total, point;
0969 void *base;
0970 int ret, order;
0971
0972
0973
0974
0975
0976 if (capabilities & NOMMU_MAP_DIRECT) {
0977 ret = call_mmap(vma->vm_file, vma);
0978 if (ret == 0) {
0979
0980 BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
0981 vma->vm_region->vm_top = vma->vm_region->vm_end;
0982 return 0;
0983 }
0984 if (ret != -ENOSYS)
0985 return ret;
0986
0987
0988
0989
0990 }
0991
0992
0993
0994
0995
0996
0997 order = get_order(len);
0998 total = 1 << order;
0999 point = len >> PAGE_SHIFT;
1000
1001
1002 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1003 total = point;
1004
1005 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1006 if (!base)
1007 goto enomem;
1008
1009 atomic_long_add(total, &mmap_pages_allocated);
1010
1011 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1012 region->vm_start = (unsigned long) base;
1013 region->vm_end = region->vm_start + len;
1014 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1015
1016 vma->vm_start = region->vm_start;
1017 vma->vm_end = region->vm_start + len;
1018
1019 if (vma->vm_file) {
1020
1021 loff_t fpos;
1022
1023 fpos = vma->vm_pgoff;
1024 fpos <<= PAGE_SHIFT;
1025
1026 ret = kernel_read(vma->vm_file, base, len, &fpos);
1027 if (ret < 0)
1028 goto error_free;
1029
1030
1031 if (ret < len)
1032 memset(base + ret, 0, len - ret);
1033
1034 } else {
1035 vma_set_anonymous(vma);
1036 }
1037
1038 return 0;
1039
1040 error_free:
1041 free_page_series(region->vm_start, region->vm_top);
1042 region->vm_start = vma->vm_start = 0;
1043 region->vm_end = vma->vm_end = 0;
1044 region->vm_top = 0;
1045 return ret;
1046
1047 enomem:
1048 pr_err("Allocation of length %lu from process %d (%s) failed\n",
1049 len, current->pid, current->comm);
1050 show_free_areas(0, NULL);
1051 return -ENOMEM;
1052 }
1053
1054
1055
1056
1057 unsigned long do_mmap(struct file *file,
1058 unsigned long addr,
1059 unsigned long len,
1060 unsigned long prot,
1061 unsigned long flags,
1062 unsigned long pgoff,
1063 unsigned long *populate,
1064 struct list_head *uf)
1065 {
1066 struct vm_area_struct *vma;
1067 struct vm_region *region;
1068 struct rb_node *rb;
1069 vm_flags_t vm_flags;
1070 unsigned long capabilities, result;
1071 int ret;
1072
1073 *populate = 0;
1074
1075
1076
1077 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1078 &capabilities);
1079 if (ret < 0)
1080 return ret;
1081
1082
1083 addr = 0;
1084 len = PAGE_ALIGN(len);
1085
1086
1087
1088 vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1089
1090
1091 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1092 if (!region)
1093 goto error_getting_region;
1094
1095 vma = vm_area_alloc(current->mm);
1096 if (!vma)
1097 goto error_getting_vma;
1098
1099 region->vm_usage = 1;
1100 region->vm_flags = vm_flags;
1101 region->vm_pgoff = pgoff;
1102
1103 vma->vm_flags = vm_flags;
1104 vma->vm_pgoff = pgoff;
1105
1106 if (file) {
1107 region->vm_file = get_file(file);
1108 vma->vm_file = get_file(file);
1109 }
1110
1111 down_write(&nommu_region_sem);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 if (vm_flags & VM_MAYSHARE) {
1122 struct vm_region *pregion;
1123 unsigned long pglen, rpglen, pgend, rpgend, start;
1124
1125 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1126 pgend = pgoff + pglen;
1127
1128 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1129 pregion = rb_entry(rb, struct vm_region, vm_rb);
1130
1131 if (!(pregion->vm_flags & VM_MAYSHARE))
1132 continue;
1133
1134
1135 if (file_inode(pregion->vm_file) !=
1136 file_inode(file))
1137 continue;
1138
1139 if (pregion->vm_pgoff >= pgend)
1140 continue;
1141
1142 rpglen = pregion->vm_end - pregion->vm_start;
1143 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1144 rpgend = pregion->vm_pgoff + rpglen;
1145 if (pgoff >= rpgend)
1146 continue;
1147
1148
1149
1150 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1151 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1152
1153 if (!(capabilities & NOMMU_MAP_DIRECT))
1154 goto sharing_violation;
1155 continue;
1156 }
1157
1158
1159 pregion->vm_usage++;
1160 vma->vm_region = pregion;
1161 start = pregion->vm_start;
1162 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1163 vma->vm_start = start;
1164 vma->vm_end = start + len;
1165
1166 if (pregion->vm_flags & VM_MAPPED_COPY)
1167 vma->vm_flags |= VM_MAPPED_COPY;
1168 else {
1169 ret = do_mmap_shared_file(vma);
1170 if (ret < 0) {
1171 vma->vm_region = NULL;
1172 vma->vm_start = 0;
1173 vma->vm_end = 0;
1174 pregion->vm_usage--;
1175 pregion = NULL;
1176 goto error_just_free;
1177 }
1178 }
1179 fput(region->vm_file);
1180 kmem_cache_free(vm_region_jar, region);
1181 region = pregion;
1182 result = start;
1183 goto share;
1184 }
1185
1186
1187
1188
1189
1190 if (capabilities & NOMMU_MAP_DIRECT) {
1191 addr = file->f_op->get_unmapped_area(file, addr, len,
1192 pgoff, flags);
1193 if (IS_ERR_VALUE(addr)) {
1194 ret = addr;
1195 if (ret != -ENOSYS)
1196 goto error_just_free;
1197
1198
1199
1200
1201 ret = -ENODEV;
1202 if (!(capabilities & NOMMU_MAP_COPY))
1203 goto error_just_free;
1204
1205 capabilities &= ~NOMMU_MAP_DIRECT;
1206 } else {
1207 vma->vm_start = region->vm_start = addr;
1208 vma->vm_end = region->vm_end = addr + len;
1209 }
1210 }
1211 }
1212
1213 vma->vm_region = region;
1214
1215
1216
1217
1218 if (file && vma->vm_flags & VM_SHARED)
1219 ret = do_mmap_shared_file(vma);
1220 else
1221 ret = do_mmap_private(vma, region, len, capabilities);
1222 if (ret < 0)
1223 goto error_just_free;
1224 add_nommu_region(region);
1225
1226
1227 if (!vma->vm_file &&
1228 (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1229 !(flags & MAP_UNINITIALIZED)))
1230 memset((void *)region->vm_start, 0,
1231 region->vm_end - region->vm_start);
1232
1233
1234 result = vma->vm_start;
1235
1236 current->mm->total_vm += len >> PAGE_SHIFT;
1237
1238 share:
1239 add_vma_to_mm(current->mm, vma);
1240
1241
1242
1243 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1244 flush_icache_user_range(region->vm_start, region->vm_end);
1245 region->vm_icache_flushed = true;
1246 }
1247
1248 up_write(&nommu_region_sem);
1249
1250 return result;
1251
1252 error_just_free:
1253 up_write(&nommu_region_sem);
1254 error:
1255 if (region->vm_file)
1256 fput(region->vm_file);
1257 kmem_cache_free(vm_region_jar, region);
1258 if (vma->vm_file)
1259 fput(vma->vm_file);
1260 vm_area_free(vma);
1261 return ret;
1262
1263 sharing_violation:
1264 up_write(&nommu_region_sem);
1265 pr_warn("Attempt to share mismatched mappings\n");
1266 ret = -EINVAL;
1267 goto error;
1268
1269 error_getting_vma:
1270 kmem_cache_free(vm_region_jar, region);
1271 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1272 len, current->pid);
1273 show_free_areas(0, NULL);
1274 return -ENOMEM;
1275
1276 error_getting_region:
1277 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1278 len, current->pid);
1279 show_free_areas(0, NULL);
1280 return -ENOMEM;
1281 }
1282
1283 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1284 unsigned long prot, unsigned long flags,
1285 unsigned long fd, unsigned long pgoff)
1286 {
1287 struct file *file = NULL;
1288 unsigned long retval = -EBADF;
1289
1290 audit_mmap_fd(fd, flags);
1291 if (!(flags & MAP_ANONYMOUS)) {
1292 file = fget(fd);
1293 if (!file)
1294 goto out;
1295 }
1296
1297 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1298
1299 if (file)
1300 fput(file);
1301 out:
1302 return retval;
1303 }
1304
1305 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1306 unsigned long, prot, unsigned long, flags,
1307 unsigned long, fd, unsigned long, pgoff)
1308 {
1309 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1310 }
1311
1312 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1313 struct mmap_arg_struct {
1314 unsigned long addr;
1315 unsigned long len;
1316 unsigned long prot;
1317 unsigned long flags;
1318 unsigned long fd;
1319 unsigned long offset;
1320 };
1321
1322 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1323 {
1324 struct mmap_arg_struct a;
1325
1326 if (copy_from_user(&a, arg, sizeof(a)))
1327 return -EFAULT;
1328 if (offset_in_page(a.offset))
1329 return -EINVAL;
1330
1331 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1332 a.offset >> PAGE_SHIFT);
1333 }
1334 #endif
1335
1336
1337
1338
1339
1340 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1341 unsigned long addr, int new_below)
1342 {
1343 struct vm_area_struct *new;
1344 struct vm_region *region;
1345 unsigned long npages;
1346
1347
1348
1349 if (vma->vm_file)
1350 return -ENOMEM;
1351
1352 if (mm->map_count >= sysctl_max_map_count)
1353 return -ENOMEM;
1354
1355 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1356 if (!region)
1357 return -ENOMEM;
1358
1359 new = vm_area_dup(vma);
1360 if (!new) {
1361 kmem_cache_free(vm_region_jar, region);
1362 return -ENOMEM;
1363 }
1364
1365
1366 *region = *vma->vm_region;
1367 new->vm_region = region;
1368
1369 npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1370
1371 if (new_below) {
1372 region->vm_top = region->vm_end = new->vm_end = addr;
1373 } else {
1374 region->vm_start = new->vm_start = addr;
1375 region->vm_pgoff = new->vm_pgoff += npages;
1376 }
1377
1378 if (new->vm_ops && new->vm_ops->open)
1379 new->vm_ops->open(new);
1380
1381 delete_vma_from_mm(vma);
1382 down_write(&nommu_region_sem);
1383 delete_nommu_region(vma->vm_region);
1384 if (new_below) {
1385 vma->vm_region->vm_start = vma->vm_start = addr;
1386 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1387 } else {
1388 vma->vm_region->vm_end = vma->vm_end = addr;
1389 vma->vm_region->vm_top = addr;
1390 }
1391 add_nommu_region(vma->vm_region);
1392 add_nommu_region(new->vm_region);
1393 up_write(&nommu_region_sem);
1394 add_vma_to_mm(mm, vma);
1395 add_vma_to_mm(mm, new);
1396 return 0;
1397 }
1398
1399
1400
1401
1402
1403 static int shrink_vma(struct mm_struct *mm,
1404 struct vm_area_struct *vma,
1405 unsigned long from, unsigned long to)
1406 {
1407 struct vm_region *region;
1408
1409
1410
1411 delete_vma_from_mm(vma);
1412 if (from > vma->vm_start)
1413 vma->vm_end = from;
1414 else
1415 vma->vm_start = to;
1416 add_vma_to_mm(mm, vma);
1417
1418
1419 region = vma->vm_region;
1420 BUG_ON(region->vm_usage != 1);
1421
1422 down_write(&nommu_region_sem);
1423 delete_nommu_region(region);
1424 if (from > region->vm_start) {
1425 to = region->vm_top;
1426 region->vm_top = region->vm_end = from;
1427 } else {
1428 region->vm_start = to;
1429 }
1430 add_nommu_region(region);
1431 up_write(&nommu_region_sem);
1432
1433 free_page_series(from, to);
1434 return 0;
1435 }
1436
1437
1438
1439
1440
1441
1442 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1443 {
1444 struct vm_area_struct *vma;
1445 unsigned long end;
1446 int ret;
1447
1448 len = PAGE_ALIGN(len);
1449 if (len == 0)
1450 return -EINVAL;
1451
1452 end = start + len;
1453
1454
1455 vma = find_vma(mm, start);
1456 if (!vma) {
1457 static int limit;
1458 if (limit < 5) {
1459 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1460 current->pid, current->comm,
1461 start, start + len - 1);
1462 limit++;
1463 }
1464 return -EINVAL;
1465 }
1466
1467
1468 if (vma->vm_file) {
1469 do {
1470 if (start > vma->vm_start)
1471 return -EINVAL;
1472 if (end == vma->vm_end)
1473 goto erase_whole_vma;
1474 vma = vma->vm_next;
1475 } while (vma);
1476 return -EINVAL;
1477 } else {
1478
1479 if (start == vma->vm_start && end == vma->vm_end)
1480 goto erase_whole_vma;
1481 if (start < vma->vm_start || end > vma->vm_end)
1482 return -EINVAL;
1483 if (offset_in_page(start))
1484 return -EINVAL;
1485 if (end != vma->vm_end && offset_in_page(end))
1486 return -EINVAL;
1487 if (start != vma->vm_start && end != vma->vm_end) {
1488 ret = split_vma(mm, vma, start, 1);
1489 if (ret < 0)
1490 return ret;
1491 }
1492 return shrink_vma(mm, vma, start, end);
1493 }
1494
1495 erase_whole_vma:
1496 delete_vma_from_mm(vma);
1497 delete_vma(mm, vma);
1498 return 0;
1499 }
1500
1501 int vm_munmap(unsigned long addr, size_t len)
1502 {
1503 struct mm_struct *mm = current->mm;
1504 int ret;
1505
1506 mmap_write_lock(mm);
1507 ret = do_munmap(mm, addr, len, NULL);
1508 mmap_write_unlock(mm);
1509 return ret;
1510 }
1511 EXPORT_SYMBOL(vm_munmap);
1512
1513 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1514 {
1515 return vm_munmap(addr, len);
1516 }
1517
1518
1519
1520
1521 void exit_mmap(struct mm_struct *mm)
1522 {
1523 struct vm_area_struct *vma;
1524
1525 if (!mm)
1526 return;
1527
1528 mm->total_vm = 0;
1529
1530 while ((vma = mm->mmap)) {
1531 mm->mmap = vma->vm_next;
1532 delete_vma_from_mm(vma);
1533 delete_vma(mm, vma);
1534 cond_resched();
1535 }
1536 }
1537
1538 int vm_brk(unsigned long addr, unsigned long len)
1539 {
1540 return -ENOMEM;
1541 }
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553 static unsigned long do_mremap(unsigned long addr,
1554 unsigned long old_len, unsigned long new_len,
1555 unsigned long flags, unsigned long new_addr)
1556 {
1557 struct vm_area_struct *vma;
1558
1559
1560 old_len = PAGE_ALIGN(old_len);
1561 new_len = PAGE_ALIGN(new_len);
1562 if (old_len == 0 || new_len == 0)
1563 return (unsigned long) -EINVAL;
1564
1565 if (offset_in_page(addr))
1566 return -EINVAL;
1567
1568 if (flags & MREMAP_FIXED && new_addr != addr)
1569 return (unsigned long) -EINVAL;
1570
1571 vma = find_vma_exact(current->mm, addr, old_len);
1572 if (!vma)
1573 return (unsigned long) -EINVAL;
1574
1575 if (vma->vm_end != vma->vm_start + old_len)
1576 return (unsigned long) -EFAULT;
1577
1578 if (vma->vm_flags & VM_MAYSHARE)
1579 return (unsigned long) -EPERM;
1580
1581 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1582 return (unsigned long) -ENOMEM;
1583
1584
1585 vma->vm_end = vma->vm_start + new_len;
1586 return vma->vm_start;
1587 }
1588
1589 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1590 unsigned long, new_len, unsigned long, flags,
1591 unsigned long, new_addr)
1592 {
1593 unsigned long ret;
1594
1595 mmap_write_lock(current->mm);
1596 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1597 mmap_write_unlock(current->mm);
1598 return ret;
1599 }
1600
1601 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1602 unsigned int foll_flags)
1603 {
1604 return NULL;
1605 }
1606
1607 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1608 unsigned long pfn, unsigned long size, pgprot_t prot)
1609 {
1610 if (addr != (pfn << PAGE_SHIFT))
1611 return -EINVAL;
1612
1613 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1614 return 0;
1615 }
1616 EXPORT_SYMBOL(remap_pfn_range);
1617
1618 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1619 {
1620 unsigned long pfn = start >> PAGE_SHIFT;
1621 unsigned long vm_len = vma->vm_end - vma->vm_start;
1622
1623 pfn += vma->vm_pgoff;
1624 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1625 }
1626 EXPORT_SYMBOL(vm_iomap_memory);
1627
1628 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1629 unsigned long pgoff)
1630 {
1631 unsigned int size = vma->vm_end - vma->vm_start;
1632
1633 if (!(vma->vm_flags & VM_USERMAP))
1634 return -EINVAL;
1635
1636 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1637 vma->vm_end = vma->vm_start + size;
1638
1639 return 0;
1640 }
1641 EXPORT_SYMBOL(remap_vmalloc_range);
1642
1643 vm_fault_t filemap_fault(struct vm_fault *vmf)
1644 {
1645 BUG();
1646 return 0;
1647 }
1648 EXPORT_SYMBOL(filemap_fault);
1649
1650 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1651 pgoff_t start_pgoff, pgoff_t end_pgoff)
1652 {
1653 BUG();
1654 return 0;
1655 }
1656 EXPORT_SYMBOL(filemap_map_pages);
1657
1658 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
1659 int len, unsigned int gup_flags)
1660 {
1661 struct vm_area_struct *vma;
1662 int write = gup_flags & FOLL_WRITE;
1663
1664 if (mmap_read_lock_killable(mm))
1665 return 0;
1666
1667
1668 vma = find_vma(mm, addr);
1669 if (vma) {
1670
1671 if (addr + len >= vma->vm_end)
1672 len = vma->vm_end - addr;
1673
1674
1675 if (write && vma->vm_flags & VM_MAYWRITE)
1676 copy_to_user_page(vma, NULL, addr,
1677 (void *) addr, buf, len);
1678 else if (!write && vma->vm_flags & VM_MAYREAD)
1679 copy_from_user_page(vma, NULL, addr,
1680 buf, (void *) addr, len);
1681 else
1682 len = 0;
1683 } else {
1684 len = 0;
1685 }
1686
1687 mmap_read_unlock(mm);
1688
1689 return len;
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1703 void *buf, int len, unsigned int gup_flags)
1704 {
1705 return __access_remote_vm(mm, addr, buf, len, gup_flags);
1706 }
1707
1708
1709
1710
1711
1712 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1713 unsigned int gup_flags)
1714 {
1715 struct mm_struct *mm;
1716
1717 if (addr + len < addr)
1718 return 0;
1719
1720 mm = get_task_mm(tsk);
1721 if (!mm)
1722 return 0;
1723
1724 len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1725
1726 mmput(mm);
1727 return len;
1728 }
1729 EXPORT_SYMBOL_GPL(access_process_vm);
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1743 size_t newsize)
1744 {
1745 struct vm_area_struct *vma;
1746 struct vm_region *region;
1747 pgoff_t low, high;
1748 size_t r_size, r_top;
1749
1750 low = newsize >> PAGE_SHIFT;
1751 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1752
1753 down_write(&nommu_region_sem);
1754 i_mmap_lock_read(inode->i_mapping);
1755
1756
1757 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1758
1759
1760 if (vma->vm_flags & VM_SHARED) {
1761 i_mmap_unlock_read(inode->i_mapping);
1762 up_write(&nommu_region_sem);
1763 return -ETXTBSY;
1764 }
1765 }
1766
1767
1768
1769
1770
1771
1772
1773 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1774 if (!(vma->vm_flags & VM_SHARED))
1775 continue;
1776
1777 region = vma->vm_region;
1778 r_size = region->vm_top - region->vm_start;
1779 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1780
1781 if (r_top > newsize) {
1782 region->vm_top -= r_top - newsize;
1783 if (region->vm_end > region->vm_top)
1784 region->vm_end = region->vm_top;
1785 }
1786 }
1787
1788 i_mmap_unlock_read(inode->i_mapping);
1789 up_write(&nommu_region_sem);
1790 return 0;
1791 }
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 static int __meminit init_user_reserve(void)
1804 {
1805 unsigned long free_kbytes;
1806
1807 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1808
1809 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1810 return 0;
1811 }
1812 subsys_initcall(init_user_reserve);
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824 static int __meminit init_admin_reserve(void)
1825 {
1826 unsigned long free_kbytes;
1827
1828 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1829
1830 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1831 return 0;
1832 }
1833 subsys_initcall(init_admin_reserve);