0001
0002 #include <linux/mm.h>
0003 #include <linux/slab.h>
0004 #include <linux/string.h>
0005 #include <linux/compiler.h>
0006 #include <linux/export.h>
0007 #include <linux/err.h>
0008 #include <linux/sched.h>
0009 #include <linux/sched/mm.h>
0010 #include <linux/sched/signal.h>
0011 #include <linux/sched/task_stack.h>
0012 #include <linux/security.h>
0013 #include <linux/swap.h>
0014 #include <linux/swapops.h>
0015 #include <linux/mman.h>
0016 #include <linux/hugetlb.h>
0017 #include <linux/vmalloc.h>
0018 #include <linux/userfaultfd_k.h>
0019 #include <linux/elf.h>
0020 #include <linux/elf-randomize.h>
0021 #include <linux/personality.h>
0022 #include <linux/random.h>
0023 #include <linux/processor.h>
0024 #include <linux/sizes.h>
0025 #include <linux/compat.h>
0026
0027 #include <linux/uaccess.h>
0028
0029 #include "internal.h"
0030 #include "swap.h"
0031
0032
0033
0034
0035
0036
0037
0038 void kfree_const(const void *x)
0039 {
0040 if (!is_kernel_rodata((unsigned long)x))
0041 kfree(x);
0042 }
0043 EXPORT_SYMBOL(kfree_const);
0044
0045
0046
0047
0048
0049
0050
0051
0052 char *kstrdup(const char *s, gfp_t gfp)
0053 {
0054 size_t len;
0055 char *buf;
0056
0057 if (!s)
0058 return NULL;
0059
0060 len = strlen(s) + 1;
0061 buf = kmalloc_track_caller(len, gfp);
0062 if (buf)
0063 memcpy(buf, s, len);
0064 return buf;
0065 }
0066 EXPORT_SYMBOL(kstrdup);
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 const char *kstrdup_const(const char *s, gfp_t gfp)
0080 {
0081 if (is_kernel_rodata((unsigned long)s))
0082 return s;
0083
0084 return kstrdup(s, gfp);
0085 }
0086 EXPORT_SYMBOL(kstrdup_const);
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 char *kstrndup(const char *s, size_t max, gfp_t gfp)
0099 {
0100 size_t len;
0101 char *buf;
0102
0103 if (!s)
0104 return NULL;
0105
0106 len = strnlen(s, max);
0107 buf = kmalloc_track_caller(len+1, gfp);
0108 if (buf) {
0109 memcpy(buf, s, len);
0110 buf[len] = '\0';
0111 }
0112 return buf;
0113 }
0114 EXPORT_SYMBOL(kstrndup);
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 void *kmemdup(const void *src, size_t len, gfp_t gfp)
0126 {
0127 void *p;
0128
0129 p = kmalloc_track_caller(len, gfp);
0130 if (p)
0131 memcpy(p, src, len);
0132 return p;
0133 }
0134 EXPORT_SYMBOL(kmemdup);
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
0146 {
0147 char *buf;
0148
0149 if (!s)
0150 return NULL;
0151
0152 buf = kmalloc_track_caller(len + 1, gfp);
0153 if (buf) {
0154 memcpy(buf, s, len);
0155 buf[len] = '\0';
0156 }
0157 return buf;
0158 }
0159 EXPORT_SYMBOL(kmemdup_nul);
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 void *memdup_user(const void __user *src, size_t len)
0171 {
0172 void *p;
0173
0174 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
0175 if (!p)
0176 return ERR_PTR(-ENOMEM);
0177
0178 if (copy_from_user(p, src, len)) {
0179 kfree(p);
0180 return ERR_PTR(-EFAULT);
0181 }
0182
0183 return p;
0184 }
0185 EXPORT_SYMBOL(memdup_user);
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 void *vmemdup_user(const void __user *src, size_t len)
0197 {
0198 void *p;
0199
0200 p = kvmalloc(len, GFP_USER);
0201 if (!p)
0202 return ERR_PTR(-ENOMEM);
0203
0204 if (copy_from_user(p, src, len)) {
0205 kvfree(p);
0206 return ERR_PTR(-EFAULT);
0207 }
0208
0209 return p;
0210 }
0211 EXPORT_SYMBOL(vmemdup_user);
0212
0213
0214
0215
0216
0217
0218
0219
0220 char *strndup_user(const char __user *s, long n)
0221 {
0222 char *p;
0223 long length;
0224
0225 length = strnlen_user(s, n);
0226
0227 if (!length)
0228 return ERR_PTR(-EFAULT);
0229
0230 if (length > n)
0231 return ERR_PTR(-EINVAL);
0232
0233 p = memdup_user(s, length);
0234
0235 if (IS_ERR(p))
0236 return p;
0237
0238 p[length - 1] = '\0';
0239
0240 return p;
0241 }
0242 EXPORT_SYMBOL(strndup_user);
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 void *memdup_user_nul(const void __user *src, size_t len)
0253 {
0254 char *p;
0255
0256
0257
0258
0259
0260
0261 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
0262 if (!p)
0263 return ERR_PTR(-ENOMEM);
0264
0265 if (copy_from_user(p, src, len)) {
0266 kfree(p);
0267 return ERR_PTR(-EFAULT);
0268 }
0269 p[len] = '\0';
0270
0271 return p;
0272 }
0273 EXPORT_SYMBOL(memdup_user_nul);
0274
0275 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
0276 struct vm_area_struct *prev)
0277 {
0278 struct vm_area_struct *next;
0279
0280 vma->vm_prev = prev;
0281 if (prev) {
0282 next = prev->vm_next;
0283 prev->vm_next = vma;
0284 } else {
0285 next = mm->mmap;
0286 mm->mmap = vma;
0287 }
0288 vma->vm_next = next;
0289 if (next)
0290 next->vm_prev = vma;
0291 }
0292
0293 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
0294 {
0295 struct vm_area_struct *prev, *next;
0296
0297 next = vma->vm_next;
0298 prev = vma->vm_prev;
0299 if (prev)
0300 prev->vm_next = next;
0301 else
0302 mm->mmap = next;
0303 if (next)
0304 next->vm_prev = prev;
0305 }
0306
0307
0308 int vma_is_stack_for_current(struct vm_area_struct *vma)
0309 {
0310 struct task_struct * __maybe_unused t = current;
0311
0312 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
0313 }
0314
0315
0316
0317
0318 void vma_set_file(struct vm_area_struct *vma, struct file *file)
0319 {
0320
0321 get_file(file);
0322 swap(vma->vm_file, file);
0323 fput(file);
0324 }
0325 EXPORT_SYMBOL(vma_set_file);
0326
0327 #ifndef STACK_RND_MASK
0328 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
0329 #endif
0330
0331 unsigned long randomize_stack_top(unsigned long stack_top)
0332 {
0333 unsigned long random_variable = 0;
0334
0335 if (current->flags & PF_RANDOMIZE) {
0336 random_variable = get_random_long();
0337 random_variable &= STACK_RND_MASK;
0338 random_variable <<= PAGE_SHIFT;
0339 }
0340 #ifdef CONFIG_STACK_GROWSUP
0341 return PAGE_ALIGN(stack_top) + random_variable;
0342 #else
0343 return PAGE_ALIGN(stack_top) - random_variable;
0344 #endif
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 unsigned long randomize_page(unsigned long start, unsigned long range)
0362 {
0363 if (!PAGE_ALIGNED(start)) {
0364 range -= PAGE_ALIGN(start) - start;
0365 start = PAGE_ALIGN(start);
0366 }
0367
0368 if (start > ULONG_MAX - range)
0369 range = ULONG_MAX - start;
0370
0371 range >>= PAGE_SHIFT;
0372
0373 if (range == 0)
0374 return start;
0375
0376 return start + (get_random_long() % range << PAGE_SHIFT);
0377 }
0378
0379 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
0380 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
0381 {
0382
0383 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
0384 return randomize_page(mm->brk, SZ_32M);
0385
0386 return randomize_page(mm->brk, SZ_1G);
0387 }
0388
0389 unsigned long arch_mmap_rnd(void)
0390 {
0391 unsigned long rnd;
0392
0393 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
0394 if (is_compat_task())
0395 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
0396 else
0397 #endif
0398 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
0399
0400 return rnd << PAGE_SHIFT;
0401 }
0402
0403 static int mmap_is_legacy(struct rlimit *rlim_stack)
0404 {
0405 if (current->personality & ADDR_COMPAT_LAYOUT)
0406 return 1;
0407
0408 if (rlim_stack->rlim_cur == RLIM_INFINITY)
0409 return 1;
0410
0411 return sysctl_legacy_va_layout;
0412 }
0413
0414
0415
0416
0417
0418 #define MIN_GAP (SZ_128M)
0419 #define MAX_GAP (STACK_TOP / 6 * 5)
0420
0421 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
0422 {
0423 unsigned long gap = rlim_stack->rlim_cur;
0424 unsigned long pad = stack_guard_gap;
0425
0426
0427 if (current->flags & PF_RANDOMIZE)
0428 pad += (STACK_RND_MASK << PAGE_SHIFT);
0429
0430
0431 if (gap + pad > gap)
0432 gap += pad;
0433
0434 if (gap < MIN_GAP)
0435 gap = MIN_GAP;
0436 else if (gap > MAX_GAP)
0437 gap = MAX_GAP;
0438
0439 return PAGE_ALIGN(STACK_TOP - gap - rnd);
0440 }
0441
0442 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
0443 {
0444 unsigned long random_factor = 0UL;
0445
0446 if (current->flags & PF_RANDOMIZE)
0447 random_factor = arch_mmap_rnd();
0448
0449 if (mmap_is_legacy(rlim_stack)) {
0450 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
0451 mm->get_unmapped_area = arch_get_unmapped_area;
0452 } else {
0453 mm->mmap_base = mmap_base(random_factor, rlim_stack);
0454 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
0455 }
0456 }
0457 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
0458 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
0459 {
0460 mm->mmap_base = TASK_UNMAPPED_BASE;
0461 mm->get_unmapped_area = arch_get_unmapped_area;
0462 }
0463 #endif
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
0481 struct task_struct *task, bool bypass_rlim)
0482 {
0483 unsigned long locked_vm, limit;
0484 int ret = 0;
0485
0486 mmap_assert_write_locked(mm);
0487
0488 locked_vm = mm->locked_vm;
0489 if (inc) {
0490 if (!bypass_rlim) {
0491 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
0492 if (locked_vm + pages > limit)
0493 ret = -ENOMEM;
0494 }
0495 if (!ret)
0496 mm->locked_vm = locked_vm + pages;
0497 } else {
0498 WARN_ON_ONCE(pages > locked_vm);
0499 mm->locked_vm = locked_vm - pages;
0500 }
0501
0502 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
0503 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
0504 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
0505 ret ? " - exceeded" : "");
0506
0507 return ret;
0508 }
0509 EXPORT_SYMBOL_GPL(__account_locked_vm);
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
0524 {
0525 int ret;
0526
0527 if (pages == 0 || !mm)
0528 return 0;
0529
0530 mmap_write_lock(mm);
0531 ret = __account_locked_vm(mm, pages, inc, current,
0532 capable(CAP_IPC_LOCK));
0533 mmap_write_unlock(mm);
0534
0535 return ret;
0536 }
0537 EXPORT_SYMBOL_GPL(account_locked_vm);
0538
0539 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
0540 unsigned long len, unsigned long prot,
0541 unsigned long flag, unsigned long pgoff)
0542 {
0543 unsigned long ret;
0544 struct mm_struct *mm = current->mm;
0545 unsigned long populate;
0546 LIST_HEAD(uf);
0547
0548 ret = security_mmap_file(file, prot, flag);
0549 if (!ret) {
0550 if (mmap_write_lock_killable(mm))
0551 return -EINTR;
0552 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
0553 &uf);
0554 mmap_write_unlock(mm);
0555 userfaultfd_unmap_complete(mm, &uf);
0556 if (populate)
0557 mm_populate(ret, populate);
0558 }
0559 return ret;
0560 }
0561
0562 unsigned long vm_mmap(struct file *file, unsigned long addr,
0563 unsigned long len, unsigned long prot,
0564 unsigned long flag, unsigned long offset)
0565 {
0566 if (unlikely(offset + PAGE_ALIGN(len) < offset))
0567 return -EINVAL;
0568 if (unlikely(offset_in_page(offset)))
0569 return -EINVAL;
0570
0571 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
0572 }
0573 EXPORT_SYMBOL(vm_mmap);
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591 void *kvmalloc_node(size_t size, gfp_t flags, int node)
0592 {
0593 gfp_t kmalloc_flags = flags;
0594 void *ret;
0595
0596
0597
0598
0599
0600
0601
0602
0603 if (size > PAGE_SIZE) {
0604 kmalloc_flags |= __GFP_NOWARN;
0605
0606 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
0607 kmalloc_flags |= __GFP_NORETRY;
0608
0609
0610 kmalloc_flags &= ~__GFP_NOFAIL;
0611 }
0612
0613 ret = kmalloc_node(size, kmalloc_flags, node);
0614
0615
0616
0617
0618
0619 if (ret || size <= PAGE_SIZE)
0620 return ret;
0621
0622
0623 if (!gfpflags_allow_blocking(flags))
0624 return NULL;
0625
0626
0627 if (unlikely(size > INT_MAX)) {
0628 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
0629 return NULL;
0630 }
0631
0632
0633
0634
0635
0636
0637
0638 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
0639 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
0640 node, __builtin_return_address(0));
0641 }
0642 EXPORT_SYMBOL(kvmalloc_node);
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654 void kvfree(const void *addr)
0655 {
0656 if (is_vmalloc_addr(addr))
0657 vfree(addr);
0658 else
0659 kfree(addr);
0660 }
0661 EXPORT_SYMBOL(kvfree);
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 void kvfree_sensitive(const void *addr, size_t len)
0673 {
0674 if (likely(!ZERO_OR_NULL_PTR(addr))) {
0675 memzero_explicit((void *)addr, len);
0676 kvfree(addr);
0677 }
0678 }
0679 EXPORT_SYMBOL(kvfree_sensitive);
0680
0681 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
0682 {
0683 void *newp;
0684
0685 if (oldsize >= newsize)
0686 return (void *)p;
0687 newp = kvmalloc(newsize, flags);
0688 if (!newp)
0689 return NULL;
0690 memcpy(newp, p, oldsize);
0691 kvfree(p);
0692 return newp;
0693 }
0694 EXPORT_SYMBOL(kvrealloc);
0695
0696
0697
0698
0699
0700
0701
0702 void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
0703 {
0704 size_t bytes;
0705
0706 if (unlikely(check_mul_overflow(n, size, &bytes)))
0707 return NULL;
0708 return __vmalloc(bytes, flags);
0709 }
0710 EXPORT_SYMBOL(__vmalloc_array);
0711
0712
0713
0714
0715
0716
0717 void *vmalloc_array(size_t n, size_t size)
0718 {
0719 return __vmalloc_array(n, size, GFP_KERNEL);
0720 }
0721 EXPORT_SYMBOL(vmalloc_array);
0722
0723
0724
0725
0726
0727
0728
0729 void *__vcalloc(size_t n, size_t size, gfp_t flags)
0730 {
0731 return __vmalloc_array(n, size, flags | __GFP_ZERO);
0732 }
0733 EXPORT_SYMBOL(__vcalloc);
0734
0735
0736
0737
0738
0739
0740 void *vcalloc(size_t n, size_t size)
0741 {
0742 return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
0743 }
0744 EXPORT_SYMBOL(vcalloc);
0745
0746
0747 void *page_rmapping(struct page *page)
0748 {
0749 return folio_raw_mapping(page_folio(page));
0750 }
0751
0752
0753
0754
0755
0756
0757
0758 bool folio_mapped(struct folio *folio)
0759 {
0760 long i, nr;
0761
0762 if (!folio_test_large(folio))
0763 return atomic_read(&folio->_mapcount) >= 0;
0764 if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
0765 return true;
0766 if (folio_test_hugetlb(folio))
0767 return false;
0768
0769 nr = folio_nr_pages(folio);
0770 for (i = 0; i < nr; i++) {
0771 if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
0772 return true;
0773 }
0774 return false;
0775 }
0776 EXPORT_SYMBOL(folio_mapped);
0777
0778 struct anon_vma *folio_anon_vma(struct folio *folio)
0779 {
0780 unsigned long mapping = (unsigned long)folio->mapping;
0781
0782 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
0783 return NULL;
0784 return (void *)(mapping - PAGE_MAPPING_ANON);
0785 }
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799 struct address_space *folio_mapping(struct folio *folio)
0800 {
0801 struct address_space *mapping;
0802
0803
0804 if (unlikely(folio_test_slab(folio)))
0805 return NULL;
0806
0807 if (unlikely(folio_test_swapcache(folio)))
0808 return swap_address_space(folio_swap_entry(folio));
0809
0810 mapping = folio->mapping;
0811 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
0812 return NULL;
0813
0814 return mapping;
0815 }
0816 EXPORT_SYMBOL(folio_mapping);
0817
0818
0819 int __page_mapcount(struct page *page)
0820 {
0821 int ret;
0822
0823 ret = atomic_read(&page->_mapcount) + 1;
0824
0825
0826
0827
0828 if (!PageAnon(page) && !PageHuge(page))
0829 return ret;
0830 page = compound_head(page);
0831 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
0832 if (PageDoubleMap(page))
0833 ret--;
0834 return ret;
0835 }
0836 EXPORT_SYMBOL_GPL(__page_mapcount);
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849 int folio_mapcount(struct folio *folio)
0850 {
0851 int i, compound, nr, ret;
0852
0853 if (likely(!folio_test_large(folio)))
0854 return atomic_read(&folio->_mapcount) + 1;
0855
0856 compound = folio_entire_mapcount(folio);
0857 nr = folio_nr_pages(folio);
0858 if (folio_test_hugetlb(folio))
0859 return compound;
0860 ret = compound;
0861 for (i = 0; i < nr; i++)
0862 ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1;
0863
0864 if (!folio_test_anon(folio))
0865 return ret - compound * nr;
0866 if (folio_test_double_map(folio))
0867 ret -= nr;
0868 return ret;
0869 }
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 void folio_copy(struct folio *dst, struct folio *src)
0882 {
0883 long i = 0;
0884 long nr = folio_nr_pages(src);
0885
0886 for (;;) {
0887 copy_highpage(folio_page(dst, i), folio_page(src, i));
0888 if (++i == nr)
0889 break;
0890 cond_resched();
0891 }
0892 }
0893
0894 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
0895 int sysctl_overcommit_ratio __read_mostly = 50;
0896 unsigned long sysctl_overcommit_kbytes __read_mostly;
0897 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
0898 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17;
0899 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13;
0900
0901 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
0902 size_t *lenp, loff_t *ppos)
0903 {
0904 int ret;
0905
0906 ret = proc_dointvec(table, write, buffer, lenp, ppos);
0907 if (ret == 0 && write)
0908 sysctl_overcommit_kbytes = 0;
0909 return ret;
0910 }
0911
0912 static void sync_overcommit_as(struct work_struct *dummy)
0913 {
0914 percpu_counter_sync(&vm_committed_as);
0915 }
0916
0917 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
0918 size_t *lenp, loff_t *ppos)
0919 {
0920 struct ctl_table t;
0921 int new_policy = -1;
0922 int ret;
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935 if (write) {
0936 t = *table;
0937 t.data = &new_policy;
0938 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
0939 if (ret || new_policy == -1)
0940 return ret;
0941
0942 mm_compute_batch(new_policy);
0943 if (new_policy == OVERCOMMIT_NEVER)
0944 schedule_on_each_cpu(sync_overcommit_as);
0945 sysctl_overcommit_memory = new_policy;
0946 } else {
0947 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
0948 }
0949
0950 return ret;
0951 }
0952
0953 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
0954 size_t *lenp, loff_t *ppos)
0955 {
0956 int ret;
0957
0958 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
0959 if (ret == 0 && write)
0960 sysctl_overcommit_ratio = 0;
0961 return ret;
0962 }
0963
0964
0965
0966
0967 unsigned long vm_commit_limit(void)
0968 {
0969 unsigned long allowed;
0970
0971 if (sysctl_overcommit_kbytes)
0972 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
0973 else
0974 allowed = ((totalram_pages() - hugetlb_total_pages())
0975 * sysctl_overcommit_ratio / 100);
0976 allowed += total_swap_pages;
0977
0978 return allowed;
0979 }
0980
0981
0982
0983
0984
0985 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000 unsigned long vm_memory_committed(void)
1001 {
1002 return percpu_counter_sum_positive(&vm_committed_as);
1003 }
1004 EXPORT_SYMBOL_GPL(vm_memory_committed);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1023 {
1024 long allowed;
1025
1026 vm_acct_memory(pages);
1027
1028
1029
1030
1031 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1032 return 0;
1033
1034 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1035 if (pages > totalram_pages() + total_swap_pages)
1036 goto error;
1037 return 0;
1038 }
1039
1040 allowed = vm_commit_limit();
1041
1042
1043
1044 if (!cap_sys_admin)
1045 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1046
1047
1048
1049
1050 if (mm) {
1051 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1052
1053 allowed -= min_t(long, mm->total_vm / 32, reserve);
1054 }
1055
1056 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1057 return 0;
1058 error:
1059 vm_unacct_memory(pages);
1060
1061 return -ENOMEM;
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1075 {
1076 int res = 0;
1077 unsigned int len;
1078 struct mm_struct *mm = get_task_mm(task);
1079 unsigned long arg_start, arg_end, env_start, env_end;
1080 if (!mm)
1081 goto out;
1082 if (!mm->arg_end)
1083 goto out_mm;
1084
1085 spin_lock(&mm->arg_lock);
1086 arg_start = mm->arg_start;
1087 arg_end = mm->arg_end;
1088 env_start = mm->env_start;
1089 env_end = mm->env_end;
1090 spin_unlock(&mm->arg_lock);
1091
1092 len = arg_end - arg_start;
1093
1094 if (len > buflen)
1095 len = buflen;
1096
1097 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1098
1099
1100
1101
1102
1103 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1104 len = strnlen(buffer, res);
1105 if (len < res) {
1106 res = len;
1107 } else {
1108 len = env_end - env_start;
1109 if (len > buflen - res)
1110 len = buflen - res;
1111 res += access_process_vm(task, env_start,
1112 buffer+res, len,
1113 FOLL_FORCE);
1114 res = strnlen(buffer, res);
1115 }
1116 }
1117 out_mm:
1118 mmput(mm);
1119 out:
1120 return res;
1121 }
1122
1123 int __weak memcmp_pages(struct page *page1, struct page *page2)
1124 {
1125 char *addr1, *addr2;
1126 int ret;
1127
1128 addr1 = kmap_atomic(page1);
1129 addr2 = kmap_atomic(page2);
1130 ret = memcmp(addr1, addr2, PAGE_SIZE);
1131 kunmap_atomic(addr2);
1132 kunmap_atomic(addr1);
1133 return ret;
1134 }
1135
1136 #ifdef CONFIG_PRINTK
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 void mem_dump_obj(void *object)
1149 {
1150 const char *type;
1151
1152 if (kmem_valid_obj(object)) {
1153 kmem_dump_obj(object);
1154 return;
1155 }
1156
1157 if (vmalloc_dump_obj(object))
1158 return;
1159
1160 if (virt_addr_valid(object))
1161 type = "non-slab/vmalloc memory";
1162 else if (object == NULL)
1163 type = "NULL pointer";
1164 else if (object == ZERO_SIZE_PTR)
1165 type = "zero-size pointer";
1166 else
1167 type = "non-paged memory";
1168
1169 pr_cont(" %s\n", type);
1170 }
1171 EXPORT_SYMBOL_GPL(mem_dump_obj);
1172 #endif
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 static DECLARE_RWSEM(page_offline_rwsem);
1191
1192 void page_offline_freeze(void)
1193 {
1194 down_read(&page_offline_rwsem);
1195 }
1196
1197 void page_offline_thaw(void)
1198 {
1199 up_read(&page_offline_rwsem);
1200 }
1201
1202 void page_offline_begin(void)
1203 {
1204 down_write(&page_offline_rwsem);
1205 }
1206 EXPORT_SYMBOL(page_offline_begin);
1207
1208 void page_offline_end(void)
1209 {
1210 up_write(&page_offline_rwsem);
1211 }
1212 EXPORT_SYMBOL(page_offline_end);
1213
1214 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1215 void flush_dcache_folio(struct folio *folio)
1216 {
1217 long i, nr = folio_nr_pages(folio);
1218
1219 for (i = 0; i < nr; i++)
1220 flush_dcache_page(folio_page(folio, i));
1221 }
1222 EXPORT_SYMBOL(flush_dcache_folio);
1223 #endif